1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/module.h>
30#include <linux/pgtable.h>
31
32MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
33MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
34
35
36
37#include <linux/ioctl.h>
38#include <linux/uaccess.h>
39
40#include <linux/stat.h>
41#include <linux/slab.h>
42#include <linux/pci.h>
43#include <linux/proc_fs.h>
44#include <linux/blkdev.h>
45#include <linux/delay.h>
46#include <linux/interrupt.h>
47#include <linux/kernel.h>
48#include <linux/sched.h>
49#include <linux/reboot.h>
50#include <linux/spinlock.h>
51#include <linux/dma-mapping.h>
52
53#include <linux/timer.h>
54#include <linux/string.h>
55#include <linux/ioport.h>
56#include <linux/mutex.h>
57
58#include <asm/processor.h>
59#include <asm/io.h>
60
61#include <scsi/scsi.h>
62#include <scsi/scsi_cmnd.h>
63#include <scsi/scsi_device.h>
64#include <scsi/scsi_host.h>
65#include <scsi/scsi_tcq.h>
66
67#include "dpt/dptsig.h"
68#include "dpti.h"
69
70
71
72
73
74
75static DEFINE_MUTEX(adpt_mutex);
76static dpt_sig_S DPTI_sig = {
77 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
78#ifdef __i386__
79 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
80#elif defined(__ia64__)
81 PROC_INTEL, PROC_IA64,
82#elif defined(__sparc__)
83 PROC_ULTRASPARC, PROC_ULTRASPARC,
84#elif defined(__alpha__)
85 PROC_ALPHA, PROC_ALPHA,
86#else
87 (-1),(-1),
88#endif
89 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
90 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
91 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
92};
93
94
95
96
97
98
99
100
101
102static DEFINE_MUTEX(adpt_configuration_lock);
103
104static struct i2o_sys_tbl *sys_tbl;
105static dma_addr_t sys_tbl_pa;
106static int sys_tbl_ind;
107static int sys_tbl_len;
108
109static adpt_hba* hba_chain = NULL;
110static int hba_count = 0;
111
112static struct class *adpt_sysfs_class;
113
114static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
115#ifdef CONFIG_COMPAT
116static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
117#endif
118
119static const struct file_operations adpt_fops = {
120 .unlocked_ioctl = adpt_unlocked_ioctl,
121 .open = adpt_open,
122 .release = adpt_close,
123#ifdef CONFIG_COMPAT
124 .compat_ioctl = compat_adpt_ioctl,
125#endif
126 .llseek = noop_llseek,
127};
128
129
130
131
132struct adpt_i2o_post_wait_data
133{
134 int status;
135 u32 id;
136 adpt_wait_queue_head_t *wq;
137 struct adpt_i2o_post_wait_data *next;
138};
139
140static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
141static u32 adpt_post_wait_id = 0;
142static DEFINE_SPINLOCK(adpt_post_wait_lock);
143
144
145
146
147
148
149
150static inline int dpt_dma64(adpt_hba *pHba)
151{
152 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
153}
154
155static inline u32 dma_high(dma_addr_t addr)
156{
157 return upper_32_bits(addr);
158}
159
160static inline u32 dma_low(dma_addr_t addr)
161{
162 return (u32)addr;
163}
164
165static u8 adpt_read_blink_led(adpt_hba* host)
166{
167 if (host->FwDebugBLEDflag_P) {
168 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
169 return readb(host->FwDebugBLEDvalue_P);
170 }
171 }
172 return 0;
173}
174
175
176
177
178
179
180#ifdef MODULE
181static struct pci_device_id dptids[] = {
182 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
183 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
184 { 0, }
185};
186#endif
187
188MODULE_DEVICE_TABLE(pci,dptids);
189
190static int adpt_detect(struct scsi_host_template* sht)
191{
192 struct pci_dev *pDev = NULL;
193 adpt_hba *pHba;
194 adpt_hba *next;
195
196 PINFO("Detecting Adaptec I2O RAID controllers...\n");
197
198
199 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
200 if(pDev->device == PCI_DPT_DEVICE_ID ||
201 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
202 if(adpt_install_hba(sht, pDev) ){
203 PERROR("Could not Init an I2O RAID device\n");
204 PERROR("Will not try to detect others.\n");
205 return hba_count-1;
206 }
207 pci_dev_get(pDev);
208 }
209 }
210
211
212 for (pHba = hba_chain; pHba; pHba = next) {
213 next = pHba->next;
214
215 if (adpt_i2o_activate_hba(pHba) < 0) {
216 adpt_i2o_delete_hba(pHba);
217 }
218 }
219
220
221
222
223rebuild_sys_tab:
224 if (hba_chain == NULL)
225 return 0;
226
227
228
229
230
231 if (adpt_i2o_build_sys_table() < 0) {
232 adpt_i2o_sys_shutdown();
233 return 0;
234 }
235
236 PDEBUG("HBA's in HOLD state\n");
237
238
239 for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 if (adpt_i2o_online_hba(pHba) < 0) {
241 adpt_i2o_delete_hba(pHba);
242 goto rebuild_sys_tab;
243 }
244 }
245
246
247 PDEBUG("HBA's in OPERATIONAL state\n");
248
249 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
250 for (pHba = hba_chain; pHba; pHba = next) {
251 next = pHba->next;
252 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 if (adpt_i2o_lct_get(pHba) < 0){
254 adpt_i2o_delete_hba(pHba);
255 continue;
256 }
257
258 if (adpt_i2o_parse_lct(pHba) < 0){
259 adpt_i2o_delete_hba(pHba);
260 continue;
261 }
262 adpt_inquiry(pHba);
263 }
264
265 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 if (IS_ERR(adpt_sysfs_class)) {
267 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 adpt_sysfs_class = NULL;
269 }
270
271 for (pHba = hba_chain; pHba; pHba = next) {
272 next = pHba->next;
273 if (adpt_scsi_host_alloc(pHba, sht) < 0){
274 adpt_i2o_delete_hba(pHba);
275 continue;
276 }
277 pHba->initialized = TRUE;
278 pHba->state &= ~DPTI_STATE_RESET;
279 if (adpt_sysfs_class) {
280 struct device *dev = device_create(adpt_sysfs_class,
281 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
282 "dpti%d", pHba->unit);
283 if (IS_ERR(dev)) {
284 printk(KERN_WARNING"dpti%d: unable to "
285 "create device in dpt_i2o class\n",
286 pHba->unit);
287 }
288 }
289 }
290
291
292
293
294 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
295 adpt_i2o_sys_shutdown();
296 return 0;
297 }
298 return hba_count;
299}
300
301
302static void adpt_release(adpt_hba *pHba)
303{
304 struct Scsi_Host *shost = pHba->host;
305
306 scsi_remove_host(shost);
307
308 adpt_i2o_delete_hba(pHba);
309 scsi_host_put(shost);
310}
311
312
313static void adpt_inquiry(adpt_hba* pHba)
314{
315 u32 msg[17];
316 u32 *mptr;
317 u32 *lenptr;
318 int direction;
319 int scsidir;
320 u32 len;
321 u32 reqlen;
322 u8* buf;
323 dma_addr_t addr;
324 u8 scb[16];
325 s32 rcode;
326
327 memset(msg, 0, sizeof(msg));
328 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
329 if(!buf){
330 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
331 return;
332 }
333 memset((void*)buf, 0, 36);
334
335 len = 36;
336 direction = 0x00000000;
337 scsidir =0x40000000;
338
339 if (dpt_dma64(pHba))
340 reqlen = 17;
341 else
342 reqlen = 14;
343
344 msg[0] = reqlen<<16 | SGL_OFFSET_12;
345 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
346 msg[2] = 0;
347 msg[3] = 0;
348
349 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
350 msg[5] = ADAPTER_TID | 1<<16 ;
351
352
353
354
355 msg[6] = scsidir|0x20a00000| 6 ;
356
357 mptr=msg+7;
358
359 memset(scb, 0, sizeof(scb));
360
361 scb[0] = INQUIRY;
362 scb[1] = 0;
363 scb[2] = 0;
364 scb[3] = 0;
365 scb[4] = 36;
366 scb[5] = 0;
367
368
369 memcpy(mptr, scb, sizeof(scb));
370 mptr+=4;
371 lenptr=mptr++;
372
373
374 *lenptr = len;
375 if (dpt_dma64(pHba)) {
376 *mptr++ = (0x7C<<24)+(2<<16)+0x02;
377 *mptr++ = 1 << PAGE_SHIFT;
378 *mptr++ = 0xD0000000|direction|len;
379 *mptr++ = dma_low(addr);
380 *mptr++ = dma_high(addr);
381 } else {
382 *mptr++ = 0xD0000000|direction|len;
383 *mptr++ = addr;
384 }
385
386
387 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
388 if (rcode != 0) {
389 sprintf(pHba->detail, "Adaptec I2O RAID");
390 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
391 if (rcode != -ETIME && rcode != -EINTR)
392 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
393 } else {
394 memset(pHba->detail, 0, sizeof(pHba->detail));
395 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
396 memcpy(&(pHba->detail[16]), " Model: ", 8);
397 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
398 memcpy(&(pHba->detail[40]), " FW: ", 4);
399 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
400 pHba->detail[48] = '\0';
401 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
402 }
403 adpt_i2o_status_get(pHba);
404 return ;
405}
406
407
408static int adpt_slave_configure(struct scsi_device * device)
409{
410 struct Scsi_Host *host = device->host;
411
412 if (host->can_queue && device->tagged_supported) {
413 scsi_change_queue_depth(device,
414 host->can_queue - 1);
415 }
416 return 0;
417}
418
419static int adpt_queue_lck(struct scsi_cmnd *cmd)
420{
421 adpt_hba* pHba = NULL;
422 struct adpt_device* pDev = NULL;
423
424
425
426
427
428
429
430
431 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
432 cmd->result = (DID_OK << 16);
433 scsi_done(cmd);
434 return 0;
435 }
436
437 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
438 if (!pHba) {
439 return FAILED;
440 }
441
442 rmb();
443 if ((pHba->state) & DPTI_STATE_RESET)
444 return SCSI_MLQUEUE_HOST_BUSY;
445
446
447
448 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
449
450
451
452
453
454 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
455
456
457 cmd->result = (DID_NO_CONNECT << 16);
458 scsi_done(cmd);
459 return 0;
460 }
461 cmd->device->hostdata = pDev;
462 }
463 pDev->pScsi_dev = cmd->device;
464
465
466
467
468
469 if (pDev->state & DPTI_DEV_RESET ) {
470 return FAILED;
471 }
472 return adpt_scsi_to_i2o(pHba, cmd, pDev);
473}
474
475static DEF_SCSI_QCMD(adpt_queue)
476
477static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
478 sector_t capacity, int geom[])
479{
480 int heads=-1;
481 int sectors=-1;
482 int cylinders=-1;
483
484
485
486
487 if (capacity < 0x2000 ) {
488 heads = 18;
489 sectors = 2;
490 }
491
492 else if (capacity < 0x20000) {
493 heads = 64;
494 sectors = 32;
495 }
496
497 else if (capacity < 0x40000) {
498 heads = 65;
499 sectors = 63;
500 }
501
502 else if (capacity < 0x80000) {
503 heads = 128;
504 sectors = 63;
505 }
506
507 else {
508 heads = 255;
509 sectors = 63;
510 }
511 cylinders = sector_div(capacity, heads * sectors);
512
513
514 if(sdev->type == 5) {
515 heads = 252;
516 sectors = 63;
517 cylinders = 1111;
518 }
519
520 geom[0] = heads;
521 geom[1] = sectors;
522 geom[2] = cylinders;
523
524 PDEBUG("adpt_bios_param: exit\n");
525 return 0;
526}
527
528
529static const char *adpt_info(struct Scsi_Host *host)
530{
531 adpt_hba* pHba;
532
533 pHba = (adpt_hba *) host->hostdata[0];
534 return (char *) (pHba->detail);
535}
536
537static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
538{
539 struct adpt_device* d;
540 int id;
541 int chan;
542 adpt_hba* pHba;
543 int unit;
544
545
546 mutex_lock(&adpt_configuration_lock);
547 for (pHba = hba_chain; pHba; pHba = pHba->next) {
548 if (pHba->host == host) {
549 break;
550 }
551 }
552 mutex_unlock(&adpt_configuration_lock);
553 if (pHba == NULL) {
554 return 0;
555 }
556 host = pHba->host;
557
558 seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
559 seq_printf(m, "%s\n", pHba->detail);
560 seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
561 pHba->host->host_no, pHba->name, host->irq);
562 seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
563 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
564
565 seq_puts(m, "Devices:\n");
566 for(chan = 0; chan < MAX_CHANNEL; chan++) {
567 for(id = 0; id < MAX_ID; id++) {
568 d = pHba->channel[chan].device[id];
569 while(d) {
570 seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
571 seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
572
573 unit = d->pI2o_dev->lct_data.tid;
574 seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
575 unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
576 scsi_device_online(d->pScsi_dev)? "online":"offline");
577 d = d->next_lun;
578 }
579 }
580 }
581 return 0;
582}
583
584
585
586
587static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
588{
589#if BITS_PER_LONG == 32
590 return (u32)(unsigned long)reply;
591#else
592 ulong flags = 0;
593 u32 nr, i;
594
595 spin_lock_irqsave(pHba->host->host_lock, flags);
596 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
597 for (i = 0; i < nr; i++) {
598 if (pHba->ioctl_reply_context[i] == NULL) {
599 pHba->ioctl_reply_context[i] = reply;
600 break;
601 }
602 }
603 spin_unlock_irqrestore(pHba->host->host_lock, flags);
604 if (i >= nr) {
605 printk(KERN_WARNING"%s: Too many outstanding "
606 "ioctl commands\n", pHba->name);
607 return (u32)-1;
608 }
609
610 return i;
611#endif
612}
613
614
615
616
617static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
618{
619#if BITS_PER_LONG == 32
620 return (void *)(unsigned long)context;
621#else
622 void *p = pHba->ioctl_reply_context[context];
623 pHba->ioctl_reply_context[context] = NULL;
624
625 return p;
626#endif
627}
628
629
630
631
632
633
634static int adpt_abort(struct scsi_cmnd * cmd)
635{
636 adpt_hba* pHba = NULL;
637 struct adpt_device* dptdevice;
638 u32 msg[5];
639 int rcode;
640
641 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
642 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
643 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
644 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
645 return FAILED;
646 }
647
648 memset(msg, 0, sizeof(msg));
649 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
650 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
651 msg[2] = 0;
652 msg[3]= 0;
653
654 msg[4] = scsi_cmd_to_rq(cmd)->tag + 1;
655 if (pHba->host)
656 spin_lock_irq(pHba->host->host_lock);
657 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
658 if (pHba->host)
659 spin_unlock_irq(pHba->host->host_lock);
660 if (rcode != 0) {
661 if(rcode == -EOPNOTSUPP ){
662 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
663 return FAILED;
664 }
665 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
666 return FAILED;
667 }
668 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
669 return SUCCESS;
670}
671
672
673#define I2O_DEVICE_RESET 0x27
674
675
676
677static int adpt_device_reset(struct scsi_cmnd* cmd)
678{
679 adpt_hba* pHba;
680 u32 msg[4];
681 u32 rcode;
682 int old_state;
683 struct adpt_device* d = cmd->device->hostdata;
684
685 pHba = (void*) cmd->device->host->hostdata[0];
686 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
687 if (!d) {
688 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
689 return FAILED;
690 }
691 memset(msg, 0, sizeof(msg));
692 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
693 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
694 msg[2] = 0;
695 msg[3] = 0;
696
697 if (pHba->host)
698 spin_lock_irq(pHba->host->host_lock);
699 old_state = d->state;
700 d->state |= DPTI_DEV_RESET;
701 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
702 d->state = old_state;
703 if (pHba->host)
704 spin_unlock_irq(pHba->host->host_lock);
705 if (rcode != 0) {
706 if(rcode == -EOPNOTSUPP ){
707 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
708 return FAILED;
709 }
710 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
711 return FAILED;
712 } else {
713 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
714 return SUCCESS;
715 }
716}
717
718
719#define I2O_HBA_BUS_RESET 0x87
720
721static int adpt_bus_reset(struct scsi_cmnd* cmd)
722{
723 adpt_hba* pHba;
724 u32 msg[4];
725 u32 rcode;
726
727 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
728 memset(msg, 0, sizeof(msg));
729 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
730 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
731 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
732 msg[2] = 0;
733 msg[3] = 0;
734 if (pHba->host)
735 spin_lock_irq(pHba->host->host_lock);
736 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
737 if (pHba->host)
738 spin_unlock_irq(pHba->host->host_lock);
739 if (rcode != 0) {
740 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
741 return FAILED;
742 } else {
743 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
744 return SUCCESS;
745 }
746}
747
748
749static int __adpt_reset(struct scsi_cmnd* cmd)
750{
751 adpt_hba* pHba;
752 int rcode;
753 char name[32];
754
755 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
756 strncpy(name, pHba->name, sizeof(name));
757 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
758 rcode = adpt_hba_reset(pHba);
759 if(rcode == 0){
760 printk(KERN_WARNING"%s: HBA reset complete\n", name);
761 return SUCCESS;
762 } else {
763 printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
764 return FAILED;
765 }
766}
767
768static int adpt_reset(struct scsi_cmnd* cmd)
769{
770 int rc;
771
772 spin_lock_irq(cmd->device->host->host_lock);
773 rc = __adpt_reset(cmd);
774 spin_unlock_irq(cmd->device->host->host_lock);
775
776 return rc;
777}
778
779
780static int adpt_hba_reset(adpt_hba* pHba)
781{
782 int rcode;
783
784 pHba->state |= DPTI_STATE_RESET;
785
786
787 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
788 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
789 adpt_i2o_delete_hba(pHba);
790 return rcode;
791 }
792
793 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
794 adpt_i2o_delete_hba(pHba);
795 return rcode;
796 }
797 PDEBUG("%s: in HOLD state\n",pHba->name);
798
799 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
800 adpt_i2o_delete_hba(pHba);
801 return rcode;
802 }
803 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
804
805 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
806 adpt_i2o_delete_hba(pHba);
807 return rcode;
808 }
809
810 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
811 adpt_i2o_delete_hba(pHba);
812 return rcode;
813 }
814 pHba->state &= ~DPTI_STATE_RESET;
815
816 scsi_host_complete_all_commands(pHba->host, DID_RESET);
817 return 0;
818}
819
820
821
822
823
824
825
826static void adpt_i2o_sys_shutdown(void)
827{
828 adpt_hba *pHba, *pNext;
829 struct adpt_i2o_post_wait_data *p1, *old;
830
831 printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
832 printk(KERN_INFO " This could take a few minutes if there are many devices attached\n");
833
834
835
836
837 for (pHba = hba_chain; pHba; pHba = pNext) {
838 pNext = pHba->next;
839 adpt_i2o_delete_hba(pHba);
840 }
841
842
843
844
845
846
847 for(p1 = adpt_post_wait_queue; p1;) {
848 old = p1;
849 p1 = p1->next;
850 kfree(old);
851 }
852
853 adpt_post_wait_queue = NULL;
854
855 printk(KERN_INFO "Adaptec I2O controllers down.\n");
856}
857
858static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
859{
860
861 adpt_hba* pHba = NULL;
862 adpt_hba* p = NULL;
863 ulong base_addr0_phys = 0;
864 ulong base_addr1_phys = 0;
865 u32 hba_map0_area_size = 0;
866 u32 hba_map1_area_size = 0;
867 void __iomem *base_addr_virt = NULL;
868 void __iomem *msg_addr_virt = NULL;
869 int dma64 = 0;
870
871 int raptorFlag = FALSE;
872
873 if(pci_enable_device(pDev)) {
874 return -EINVAL;
875 }
876
877 if (pci_request_regions(pDev, "dpt_i2o")) {
878 PERROR("dpti: adpt_config_hba: pci request region failed\n");
879 return -EINVAL;
880 }
881
882 pci_set_master(pDev);
883
884
885
886
887 if (sizeof(dma_addr_t) > 4 &&
888 dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
889 dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
890 dma64 = 1;
891
892 if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
893 return -EINVAL;
894
895
896 dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
897
898 base_addr0_phys = pci_resource_start(pDev,0);
899 hba_map0_area_size = pci_resource_len(pDev,0);
900
901
902 if(pDev->device == PCI_DPT_DEVICE_ID){
903 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
904
905 hba_map0_area_size = 0x400000;
906 } else {
907 if(hba_map0_area_size > 0x100000 ){
908 hba_map0_area_size = 0x100000;
909 }
910 }
911 } else {
912
913 base_addr1_phys = pci_resource_start(pDev,1);
914 hba_map1_area_size = pci_resource_len(pDev,1);
915 raptorFlag = TRUE;
916 }
917
918#if BITS_PER_LONG == 64
919
920
921
922
923
924
925
926 if (raptorFlag == TRUE) {
927 if (hba_map0_area_size > 128)
928 hba_map0_area_size = 128;
929 if (hba_map1_area_size > 524288)
930 hba_map1_area_size = 524288;
931 } else {
932 if (hba_map0_area_size > 524288)
933 hba_map0_area_size = 524288;
934 }
935#endif
936
937 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
938 if (!base_addr_virt) {
939 pci_release_regions(pDev);
940 PERROR("dpti: adpt_config_hba: io remap failed\n");
941 return -EINVAL;
942 }
943
944 if(raptorFlag == TRUE) {
945 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
946 if (!msg_addr_virt) {
947 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
948 iounmap(base_addr_virt);
949 pci_release_regions(pDev);
950 return -EINVAL;
951 }
952 } else {
953 msg_addr_virt = base_addr_virt;
954 }
955
956
957 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
958 if (!pHba) {
959 if (msg_addr_virt != base_addr_virt)
960 iounmap(msg_addr_virt);
961 iounmap(base_addr_virt);
962 pci_release_regions(pDev);
963 return -ENOMEM;
964 }
965
966 mutex_lock(&adpt_configuration_lock);
967
968 if(hba_chain != NULL){
969 for(p = hba_chain; p->next; p = p->next);
970 p->next = pHba;
971 } else {
972 hba_chain = pHba;
973 }
974 pHba->next = NULL;
975 pHba->unit = hba_count;
976 sprintf(pHba->name, "dpti%d", hba_count);
977 hba_count++;
978
979 mutex_unlock(&adpt_configuration_lock);
980
981 pHba->pDev = pDev;
982 pHba->base_addr_phys = base_addr0_phys;
983
984
985 pHba->base_addr_virt = base_addr_virt;
986 pHba->msg_addr_virt = msg_addr_virt;
987 pHba->irq_mask = base_addr_virt+0x30;
988 pHba->post_port = base_addr_virt+0x40;
989 pHba->reply_port = base_addr_virt+0x44;
990
991 pHba->hrt = NULL;
992 pHba->lct = NULL;
993 pHba->lct_size = 0;
994 pHba->status_block = NULL;
995 pHba->post_count = 0;
996 pHba->state = DPTI_STATE_RESET;
997 pHba->pDev = pDev;
998 pHba->devices = NULL;
999 pHba->dma64 = dma64;
1000
1001
1002 spin_lock_init(&pHba->state_lock);
1003 spin_lock_init(&adpt_post_wait_lock);
1004
1005 if(raptorFlag == 0){
1006 printk(KERN_INFO "Adaptec I2O RAID controller"
1007 " %d at %p size=%x irq=%d%s\n",
1008 hba_count-1, base_addr_virt,
1009 hba_map0_area_size, pDev->irq,
1010 dma64 ? " (64-bit DMA)" : "");
1011 } else {
1012 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1013 hba_count-1, pDev->irq,
1014 dma64 ? " (64-bit DMA)" : "");
1015 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1016 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1017 }
1018
1019 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1020 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1021 adpt_i2o_delete_hba(pHba);
1022 return -EINVAL;
1023 }
1024
1025 return 0;
1026}
1027
1028
1029static void adpt_i2o_delete_hba(adpt_hba* pHba)
1030{
1031 adpt_hba* p1;
1032 adpt_hba* p2;
1033 struct i2o_device* d;
1034 struct i2o_device* next;
1035 int i;
1036 int j;
1037 struct adpt_device* pDev;
1038 struct adpt_device* pNext;
1039
1040
1041 mutex_lock(&adpt_configuration_lock);
1042 if(pHba->host){
1043 free_irq(pHba->host->irq, pHba);
1044 }
1045 p2 = NULL;
1046 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1047 if(p1 == pHba) {
1048 if(p2) {
1049 p2->next = p1->next;
1050 } else {
1051 hba_chain = p1->next;
1052 }
1053 break;
1054 }
1055 }
1056
1057 hba_count--;
1058 mutex_unlock(&adpt_configuration_lock);
1059
1060 iounmap(pHba->base_addr_virt);
1061 pci_release_regions(pHba->pDev);
1062 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1063 iounmap(pHba->msg_addr_virt);
1064 }
1065 if(pHba->FwDebugBuffer_P)
1066 iounmap(pHba->FwDebugBuffer_P);
1067 if(pHba->hrt) {
1068 dma_free_coherent(&pHba->pDev->dev,
1069 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1070 pHba->hrt, pHba->hrt_pa);
1071 }
1072 if(pHba->lct) {
1073 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1074 pHba->lct, pHba->lct_pa);
1075 }
1076 if(pHba->status_block) {
1077 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1078 pHba->status_block, pHba->status_block_pa);
1079 }
1080 if(pHba->reply_pool) {
1081 dma_free_coherent(&pHba->pDev->dev,
1082 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1083 pHba->reply_pool, pHba->reply_pool_pa);
1084 }
1085
1086 for(d = pHba->devices; d ; d = next){
1087 next = d->next;
1088 kfree(d);
1089 }
1090 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1091 for(j = 0; j < MAX_ID; j++){
1092 if(pHba->channel[i].device[j] != NULL){
1093 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1094 pNext = pDev->next_lun;
1095 kfree(pDev);
1096 }
1097 }
1098 }
1099 }
1100 pci_dev_put(pHba->pDev);
1101 if (adpt_sysfs_class)
1102 device_destroy(adpt_sysfs_class,
1103 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1104 kfree(pHba);
1105
1106 if(hba_count <= 0){
1107 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1108 if (adpt_sysfs_class) {
1109 class_destroy(adpt_sysfs_class);
1110 adpt_sysfs_class = NULL;
1111 }
1112 }
1113}
1114
1115static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1116{
1117 struct adpt_device* d;
1118
1119 if (chan >= MAX_CHANNEL)
1120 return NULL;
1121
1122 d = pHba->channel[chan].device[id];
1123 if(!d || d->tid == 0) {
1124 return NULL;
1125 }
1126
1127
1128 if(d->scsi_lun == lun){
1129 return d;
1130 }
1131
1132
1133 for(d=d->next_lun ; d ; d = d->next_lun){
1134 if(d->scsi_lun == lun){
1135 return d;
1136 }
1137 }
1138 return NULL;
1139}
1140
1141
1142static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1143{
1144
1145
1146
1147 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1148 int status = 0;
1149 ulong flags = 0;
1150 struct adpt_i2o_post_wait_data *p1, *p2;
1151 struct adpt_i2o_post_wait_data *wait_data =
1152 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1153 DECLARE_WAITQUEUE(wait, current);
1154
1155 if (!wait_data)
1156 return -ENOMEM;
1157
1158
1159
1160
1161
1162 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1163
1164
1165 wait_data->next = adpt_post_wait_queue;
1166 adpt_post_wait_queue = wait_data;
1167 adpt_post_wait_id++;
1168 adpt_post_wait_id &= 0x7fff;
1169 wait_data->id = adpt_post_wait_id;
1170 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1171
1172 wait_data->wq = &adpt_wq_i2o_post;
1173 wait_data->status = -ETIMEDOUT;
1174
1175 add_wait_queue(&adpt_wq_i2o_post, &wait);
1176
1177 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1178 timeout *= HZ;
1179 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1180 set_current_state(TASK_INTERRUPTIBLE);
1181 if(pHba->host)
1182 spin_unlock_irq(pHba->host->host_lock);
1183 if (!timeout)
1184 schedule();
1185 else{
1186 timeout = schedule_timeout(timeout);
1187 if (timeout == 0) {
1188
1189
1190
1191 status = -ETIME;
1192 }
1193 }
1194 if(pHba->host)
1195 spin_lock_irq(pHba->host->host_lock);
1196 }
1197 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1198
1199 if(status == -ETIMEDOUT){
1200 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1201
1202 return status;
1203 }
1204
1205
1206 p2 = NULL;
1207 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1208 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1209 if(p1 == wait_data) {
1210 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1211 status = -EOPNOTSUPP;
1212 }
1213 if(p2) {
1214 p2->next = p1->next;
1215 } else {
1216 adpt_post_wait_queue = p1->next;
1217 }
1218 break;
1219 }
1220 }
1221 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1222
1223 kfree(wait_data);
1224
1225 return status;
1226}
1227
1228
1229static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1230{
1231
1232 u32 m = EMPTY_QUEUE;
1233 u32 __iomem *msg;
1234 ulong timeout = jiffies + 30*HZ;
1235 do {
1236 rmb();
1237 m = readl(pHba->post_port);
1238 if (m != EMPTY_QUEUE) {
1239 break;
1240 }
1241 if(time_after(jiffies,timeout)){
1242 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1243 return -ETIMEDOUT;
1244 }
1245 schedule_timeout_uninterruptible(1);
1246 } while(m == EMPTY_QUEUE);
1247
1248 msg = pHba->msg_addr_virt + m;
1249 memcpy_toio(msg, data, len);
1250 wmb();
1251
1252
1253 writel(m, pHba->post_port);
1254 wmb();
1255
1256 return 0;
1257}
1258
1259
1260static void adpt_i2o_post_wait_complete(u32 context, int status)
1261{
1262 struct adpt_i2o_post_wait_data *p1 = NULL;
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276 context &= 0x7fff;
1277
1278 spin_lock(&adpt_post_wait_lock);
1279 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1280 if(p1->id == context) {
1281 p1->status = status;
1282 spin_unlock(&adpt_post_wait_lock);
1283 wake_up_interruptible(p1->wq);
1284 return;
1285 }
1286 }
1287 spin_unlock(&adpt_post_wait_lock);
1288
1289 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1290 printk(KERN_DEBUG" Tasks in wait queue:\n");
1291 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1292 printk(KERN_DEBUG" %d\n",p1->id);
1293 }
1294 return;
1295}
1296
1297static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1298{
1299 u32 msg[8];
1300 u8* status;
1301 dma_addr_t addr;
1302 u32 m = EMPTY_QUEUE ;
1303 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1304
1305 if(pHba->initialized == FALSE) {
1306 timeout = jiffies + (25*HZ);
1307 } else {
1308 adpt_i2o_quiesce_hba(pHba);
1309 }
1310
1311 do {
1312 rmb();
1313 m = readl(pHba->post_port);
1314 if (m != EMPTY_QUEUE) {
1315 break;
1316 }
1317 if(time_after(jiffies,timeout)){
1318 printk(KERN_WARNING"Timeout waiting for message!\n");
1319 return -ETIMEDOUT;
1320 }
1321 schedule_timeout_uninterruptible(1);
1322 } while (m == EMPTY_QUEUE);
1323
1324 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1325 if(status == NULL) {
1326 adpt_send_nop(pHba, m);
1327 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1328 return -ENOMEM;
1329 }
1330
1331 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1332 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1333 msg[2]=0;
1334 msg[3]=0;
1335 msg[4]=0;
1336 msg[5]=0;
1337 msg[6]=dma_low(addr);
1338 msg[7]=dma_high(addr);
1339
1340 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1341 wmb();
1342 writel(m, pHba->post_port);
1343 wmb();
1344
1345 while(*status == 0){
1346 if(time_after(jiffies,timeout)){
1347 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1348
1349
1350
1351
1352 return -ETIMEDOUT;
1353 }
1354 rmb();
1355 schedule_timeout_uninterruptible(1);
1356 }
1357
1358 if(*status == 0x01 ) {
1359 PDEBUG("%s: Reset in progress...\n", pHba->name);
1360
1361
1362 do {
1363 rmb();
1364 m = readl(pHba->post_port);
1365 if (m != EMPTY_QUEUE) {
1366 break;
1367 }
1368 if(time_after(jiffies,timeout)){
1369 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1370
1371
1372
1373
1374 return -ETIMEDOUT;
1375 }
1376 schedule_timeout_uninterruptible(1);
1377 } while (m == EMPTY_QUEUE);
1378
1379 adpt_send_nop(pHba, m);
1380 }
1381 adpt_i2o_status_get(pHba);
1382 if(*status == 0x02 ||
1383 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1384 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1385 pHba->name);
1386 } else {
1387 PDEBUG("%s: Reset completed.\n", pHba->name);
1388 }
1389
1390 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1391#ifdef UARTDELAY
1392
1393
1394 adpt_delay(20000);
1395#endif
1396 return 0;
1397}
1398
1399
1400static int adpt_i2o_parse_lct(adpt_hba* pHba)
1401{
1402 int i;
1403 int max;
1404 int tid;
1405 struct i2o_device *d;
1406 i2o_lct *lct = pHba->lct;
1407 u8 bus_no = 0;
1408 s16 scsi_id;
1409 u64 scsi_lun;
1410 u32 buf[10];
1411 struct adpt_device* pDev;
1412
1413 if (lct == NULL) {
1414 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1415 return -1;
1416 }
1417
1418 max = lct->table_size;
1419 max -= 3;
1420 max /= 9;
1421
1422 for(i=0;i<max;i++) {
1423 if( lct->lct_entry[i].user_tid != 0xfff){
1424
1425
1426
1427
1428
1429
1430
1431 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1432 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1433 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1434 continue;
1435 }
1436 tid = lct->lct_entry[i].tid;
1437
1438 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1439 continue;
1440 }
1441 bus_no = buf[0]>>16;
1442 scsi_id = buf[1];
1443 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1444 if(bus_no >= MAX_CHANNEL) {
1445 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1446 continue;
1447 }
1448 if (scsi_id >= MAX_ID){
1449 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1450 continue;
1451 }
1452 if(bus_no > pHba->top_scsi_channel){
1453 pHba->top_scsi_channel = bus_no;
1454 }
1455 if(scsi_id > pHba->top_scsi_id){
1456 pHba->top_scsi_id = scsi_id;
1457 }
1458 if(scsi_lun > pHba->top_scsi_lun){
1459 pHba->top_scsi_lun = scsi_lun;
1460 }
1461 continue;
1462 }
1463 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1464 if(d==NULL)
1465 {
1466 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1467 return -ENOMEM;
1468 }
1469
1470 d->controller = pHba;
1471 d->next = NULL;
1472
1473 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1474
1475 d->flags = 0;
1476 tid = d->lct_data.tid;
1477 adpt_i2o_report_hba_unit(pHba, d);
1478 adpt_i2o_install_device(pHba, d);
1479 }
1480 bus_no = 0;
1481 for(d = pHba->devices; d ; d = d->next) {
1482 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1483 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1484 tid = d->lct_data.tid;
1485
1486
1487 if(bus_no > pHba->top_scsi_channel){
1488 pHba->top_scsi_channel = bus_no;
1489 }
1490 pHba->channel[bus_no].type = d->lct_data.class_id;
1491 pHba->channel[bus_no].tid = tid;
1492 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1493 {
1494 pHba->channel[bus_no].scsi_id = buf[1];
1495 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1496 }
1497
1498 bus_no++;
1499 if(bus_no >= MAX_CHANNEL) {
1500 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1501 break;
1502 }
1503 }
1504 }
1505
1506
1507 for(d = pHba->devices; d ; d = d->next) {
1508 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1509 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1510 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1511
1512 tid = d->lct_data.tid;
1513 scsi_id = -1;
1514
1515 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1516 bus_no = buf[0]>>16;
1517 scsi_id = buf[1];
1518 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1519 if(bus_no >= MAX_CHANNEL) {
1520 continue;
1521 }
1522 if (scsi_id >= MAX_ID) {
1523 continue;
1524 }
1525 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1526 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1527 if(pDev == NULL) {
1528 return -ENOMEM;
1529 }
1530 pHba->channel[bus_no].device[scsi_id] = pDev;
1531 } else {
1532 for( pDev = pHba->channel[bus_no].device[scsi_id];
1533 pDev->next_lun; pDev = pDev->next_lun){
1534 }
1535 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1536 if(pDev->next_lun == NULL) {
1537 return -ENOMEM;
1538 }
1539 pDev = pDev->next_lun;
1540 }
1541 pDev->tid = tid;
1542 pDev->scsi_channel = bus_no;
1543 pDev->scsi_id = scsi_id;
1544 pDev->scsi_lun = scsi_lun;
1545 pDev->pI2o_dev = d;
1546 d->owner = pDev;
1547 pDev->type = (buf[0])&0xff;
1548 pDev->flags = (buf[0]>>8)&0xff;
1549 if(scsi_id > pHba->top_scsi_id){
1550 pHba->top_scsi_id = scsi_id;
1551 }
1552 if(scsi_lun > pHba->top_scsi_lun){
1553 pHba->top_scsi_lun = scsi_lun;
1554 }
1555 }
1556 if(scsi_id == -1){
1557 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1558 d->lct_data.identity_tag);
1559 }
1560 }
1561 }
1562 return 0;
1563}
1564
1565
1566
1567
1568
1569
1570
1571static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1572{
1573 mutex_lock(&adpt_configuration_lock);
1574 d->controller=pHba;
1575 d->owner=NULL;
1576 d->next=pHba->devices;
1577 d->prev=NULL;
1578 if (pHba->devices != NULL){
1579 pHba->devices->prev=d;
1580 }
1581 pHba->devices=d;
1582 *d->dev_name = 0;
1583
1584 mutex_unlock(&adpt_configuration_lock);
1585 return 0;
1586}
1587
1588static int adpt_open(struct inode *inode, struct file *file)
1589{
1590 int minor;
1591 adpt_hba* pHba;
1592
1593 mutex_lock(&adpt_mutex);
1594
1595
1596 minor = iminor(inode);
1597 if (minor >= hba_count) {
1598 mutex_unlock(&adpt_mutex);
1599 return -ENXIO;
1600 }
1601 mutex_lock(&adpt_configuration_lock);
1602 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1603 if (pHba->unit == minor) {
1604 break;
1605 }
1606 }
1607 if (pHba == NULL) {
1608 mutex_unlock(&adpt_configuration_lock);
1609 mutex_unlock(&adpt_mutex);
1610 return -ENXIO;
1611 }
1612
1613
1614
1615
1616
1617
1618 pHba->in_use = 1;
1619 mutex_unlock(&adpt_configuration_lock);
1620 mutex_unlock(&adpt_mutex);
1621
1622 return 0;
1623}
1624
1625static int adpt_close(struct inode *inode, struct file *file)
1626{
1627 int minor;
1628 adpt_hba* pHba;
1629
1630 minor = iminor(inode);
1631 if (minor >= hba_count) {
1632 return -ENXIO;
1633 }
1634 mutex_lock(&adpt_configuration_lock);
1635 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1636 if (pHba->unit == minor) {
1637 break;
1638 }
1639 }
1640 mutex_unlock(&adpt_configuration_lock);
1641 if (pHba == NULL) {
1642 return -ENXIO;
1643 }
1644
1645 pHba->in_use = 0;
1646
1647 return 0;
1648}
1649
1650
1651static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1652{
1653 u32 msg[MAX_MESSAGE_SIZE];
1654 u32* reply = NULL;
1655 u32 size = 0;
1656 u32 reply_size = 0;
1657 u32 __user *user_msg = arg;
1658 u32 __user * user_reply = NULL;
1659 void **sg_list = NULL;
1660 u32 sg_offset = 0;
1661 u32 sg_count = 0;
1662 int sg_index = 0;
1663 u32 i = 0;
1664 u32 rcode = 0;
1665 void *p = NULL;
1666 dma_addr_t addr;
1667 ulong flags = 0;
1668
1669 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1670
1671 if(get_user(size, &user_msg[0])){
1672 return -EFAULT;
1673 }
1674 size = size>>16;
1675
1676 user_reply = &user_msg[size];
1677 if(size > MAX_MESSAGE_SIZE){
1678 return -EFAULT;
1679 }
1680 size *= 4;
1681
1682
1683 if(copy_from_user(msg, user_msg, size)) {
1684 return -EFAULT;
1685 }
1686 get_user(reply_size, &user_reply[0]);
1687 reply_size = reply_size>>16;
1688 if(reply_size > REPLY_FRAME_SIZE){
1689 reply_size = REPLY_FRAME_SIZE;
1690 }
1691 reply_size *= 4;
1692 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1693 if(reply == NULL) {
1694 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1695 return -ENOMEM;
1696 }
1697 sg_offset = (msg[0]>>4)&0xf;
1698 msg[2] = 0x40000000;
1699 msg[3] = adpt_ioctl_to_context(pHba, reply);
1700 if (msg[3] == (u32)-1) {
1701 rcode = -EBUSY;
1702 goto free;
1703 }
1704
1705 sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
1706 if (!sg_list) {
1707 rcode = -ENOMEM;
1708 goto free;
1709 }
1710 if(sg_offset) {
1711
1712 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1713 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1714 if (sg_count > pHba->sg_tablesize){
1715 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1716 rcode = -EINVAL;
1717 goto free;
1718 }
1719
1720 for(i = 0; i < sg_count; i++) {
1721 int sg_size;
1722
1723 if (!(sg[i].flag_count & 0x10000000 )) {
1724 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1725 rcode = -EINVAL;
1726 goto cleanup;
1727 }
1728 sg_size = sg[i].flag_count & 0xffffff;
1729
1730 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1731 if(!p) {
1732 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1733 pHba->name,sg_size,i,sg_count);
1734 rcode = -ENOMEM;
1735 goto cleanup;
1736 }
1737 sg_list[sg_index++] = p;
1738
1739 if(sg[i].flag_count & 0x04000000 ) {
1740
1741 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1742 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1743 rcode = -EFAULT;
1744 goto cleanup;
1745 }
1746 }
1747
1748 sg[i].addr_bus = addr;
1749 }
1750 }
1751
1752 do {
1753
1754
1755
1756
1757 if (pHba->host) {
1758 scsi_block_requests(pHba->host);
1759 spin_lock_irqsave(pHba->host->host_lock, flags);
1760 }
1761 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1762 if (rcode != 0)
1763 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1764 rcode, reply);
1765 if (pHba->host) {
1766 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1767 scsi_unblock_requests(pHba->host);
1768 }
1769 } while (rcode == -ETIMEDOUT);
1770
1771 if(rcode){
1772 goto cleanup;
1773 }
1774
1775 if(sg_offset) {
1776
1777 u32 j;
1778
1779 struct sg_simple_element* sg;
1780 int sg_size;
1781
1782
1783 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1784
1785 if(get_user(size, &user_msg[0])){
1786 rcode = -EFAULT;
1787 goto cleanup;
1788 }
1789 size = size>>16;
1790 size *= 4;
1791 if (size > MAX_MESSAGE_SIZE) {
1792 rcode = -EINVAL;
1793 goto cleanup;
1794 }
1795
1796 if (copy_from_user (msg, user_msg, size)) {
1797 rcode = -EFAULT;
1798 goto cleanup;
1799 }
1800 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1801
1802
1803 sg = (struct sg_simple_element*)(msg + sg_offset);
1804 for (j = 0; j < sg_count; j++) {
1805
1806 if(! (sg[j].flag_count & 0x4000000 )) {
1807 sg_size = sg[j].flag_count & 0xffffff;
1808
1809 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1810 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1811 rcode = -EFAULT;
1812 goto cleanup;
1813 }
1814 }
1815 }
1816 }
1817
1818
1819 if (reply_size) {
1820
1821 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1822 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1823 rcode = -EFAULT;
1824 }
1825 if(copy_to_user(user_reply, reply, reply_size)) {
1826 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1827 rcode = -EFAULT;
1828 }
1829 }
1830
1831
1832cleanup:
1833 if (rcode != -ETIME && rcode != -EINTR) {
1834 struct sg_simple_element *sg =
1835 (struct sg_simple_element*) (msg +sg_offset);
1836 while(sg_index) {
1837 if(sg_list[--sg_index]) {
1838 dma_free_coherent(&pHba->pDev->dev,
1839 sg[sg_index].flag_count & 0xffffff,
1840 sg_list[sg_index],
1841 sg[sg_index].addr_bus);
1842 }
1843 }
1844 }
1845
1846free:
1847 kfree(sg_list);
1848 kfree(reply);
1849 return rcode;
1850}
1851
1852#if defined __ia64__
1853static void adpt_ia64_info(sysInfo_S* si)
1854{
1855
1856
1857
1858 si->processorType = PROC_IA64;
1859}
1860#endif
1861
1862#if defined __sparc__
1863static void adpt_sparc_info(sysInfo_S* si)
1864{
1865
1866
1867
1868 si->processorType = PROC_ULTRASPARC;
1869}
1870#endif
1871#if defined __alpha__
1872static void adpt_alpha_info(sysInfo_S* si)
1873{
1874
1875
1876
1877 si->processorType = PROC_ALPHA;
1878}
1879#endif
1880
1881#if defined __i386__
1882
1883#include <uapi/asm/vm86.h>
1884
1885static void adpt_i386_info(sysInfo_S* si)
1886{
1887
1888
1889
1890 switch (boot_cpu_data.x86) {
1891 case CPU_386:
1892 si->processorType = PROC_386;
1893 break;
1894 case CPU_486:
1895 si->processorType = PROC_486;
1896 break;
1897 case CPU_586:
1898 si->processorType = PROC_PENTIUM;
1899 break;
1900 default:
1901 si->processorType = PROC_PENTIUM;
1902 break;
1903 }
1904}
1905#endif
1906
1907
1908
1909
1910
1911
1912
1913static int adpt_system_info(void __user *buffer)
1914{
1915 sysInfo_S si;
1916
1917 memset(&si, 0, sizeof(si));
1918
1919 si.osType = OS_LINUX;
1920 si.osMajorVersion = 0;
1921 si.osMinorVersion = 0;
1922 si.osRevision = 0;
1923 si.busType = SI_PCI_BUS;
1924 si.processorFamily = DPTI_sig.dsProcessorFamily;
1925
1926#if defined __i386__
1927 adpt_i386_info(&si);
1928#elif defined (__ia64__)
1929 adpt_ia64_info(&si);
1930#elif defined(__sparc__)
1931 adpt_sparc_info(&si);
1932#elif defined (__alpha__)
1933 adpt_alpha_info(&si);
1934#else
1935 si.processorType = 0xff ;
1936#endif
1937 if (copy_to_user(buffer, &si, sizeof(si))){
1938 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1939 return -EFAULT;
1940 }
1941
1942 return 0;
1943}
1944
1945static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1946{
1947 int minor;
1948 int error = 0;
1949 adpt_hba* pHba;
1950 ulong flags = 0;
1951 void __user *argp = (void __user *)arg;
1952
1953 minor = iminor(inode);
1954 if (minor >= DPTI_MAX_HBA){
1955 return -ENXIO;
1956 }
1957 mutex_lock(&adpt_configuration_lock);
1958 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1959 if (pHba->unit == minor) {
1960 break;
1961 }
1962 }
1963 mutex_unlock(&adpt_configuration_lock);
1964 if(pHba == NULL){
1965 return -ENXIO;
1966 }
1967
1968 while((volatile u32) pHba->state & DPTI_STATE_RESET )
1969 schedule_timeout_uninterruptible(2);
1970
1971 switch (cmd) {
1972
1973 case DPT_SIGNATURE:
1974 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1975 return -EFAULT;
1976 }
1977 break;
1978 case I2OUSRCMD:
1979 return adpt_i2o_passthru(pHba, argp);
1980
1981 case DPT_CTRLINFO:{
1982 drvrHBAinfo_S HbaInfo;
1983
1984#define FLG_OSD_PCI_VALID 0x0001
1985#define FLG_OSD_DMA 0x0002
1986#define FLG_OSD_I2O 0x0004
1987 memset(&HbaInfo, 0, sizeof(HbaInfo));
1988 HbaInfo.drvrHBAnum = pHba->unit;
1989 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1990 HbaInfo.blinkState = adpt_read_blink_led(pHba);
1991 HbaInfo.pciBusNum = pHba->pDev->bus->number;
1992 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
1993 HbaInfo.Interrupt = pHba->pDev->irq;
1994 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
1995 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
1996 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
1997 return -EFAULT;
1998 }
1999 break;
2000 }
2001 case DPT_SYSINFO:
2002 return adpt_system_info(argp);
2003 case DPT_BLINKLED:{
2004 u32 value;
2005 value = (u32)adpt_read_blink_led(pHba);
2006 if (copy_to_user(argp, &value, sizeof(value))) {
2007 return -EFAULT;
2008 }
2009 break;
2010 }
2011 case I2ORESETCMD: {
2012 struct Scsi_Host *shost = pHba->host;
2013
2014 if (shost)
2015 spin_lock_irqsave(shost->host_lock, flags);
2016 adpt_hba_reset(pHba);
2017 if (shost)
2018 spin_unlock_irqrestore(shost->host_lock, flags);
2019 break;
2020 }
2021 case I2ORESCANCMD:
2022 adpt_rescan(pHba);
2023 break;
2024 default:
2025 return -EINVAL;
2026 }
2027
2028 return error;
2029}
2030
2031static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2032{
2033 struct inode *inode;
2034 long ret;
2035
2036 inode = file_inode(file);
2037
2038 mutex_lock(&adpt_mutex);
2039 ret = adpt_ioctl(inode, file, cmd, arg);
2040 mutex_unlock(&adpt_mutex);
2041
2042 return ret;
2043}
2044
2045#ifdef CONFIG_COMPAT
2046static long compat_adpt_ioctl(struct file *file,
2047 unsigned int cmd, unsigned long arg)
2048{
2049 struct inode *inode;
2050 long ret;
2051
2052 inode = file_inode(file);
2053
2054 mutex_lock(&adpt_mutex);
2055
2056 switch(cmd) {
2057 case DPT_SIGNATURE:
2058 case I2OUSRCMD:
2059 case DPT_CTRLINFO:
2060 case DPT_SYSINFO:
2061 case DPT_BLINKLED:
2062 case I2ORESETCMD:
2063 case I2ORESCANCMD:
2064 case (DPT_TARGET_BUSY & 0xFFFF):
2065 case DPT_TARGET_BUSY:
2066 ret = adpt_ioctl(inode, file, cmd, arg);
2067 break;
2068 default:
2069 ret = -ENOIOCTLCMD;
2070 }
2071
2072 mutex_unlock(&adpt_mutex);
2073
2074 return ret;
2075}
2076#endif
2077
2078static irqreturn_t adpt_isr(int irq, void *dev_id)
2079{
2080 struct scsi_cmnd* cmd;
2081 adpt_hba* pHba = dev_id;
2082 u32 m;
2083 void __iomem *reply;
2084 u32 status=0;
2085 u32 context;
2086 ulong flags = 0;
2087 int handled = 0;
2088
2089 if (pHba == NULL){
2090 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2091 return IRQ_NONE;
2092 }
2093 if(pHba->host)
2094 spin_lock_irqsave(pHba->host->host_lock, flags);
2095
2096 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2097 m = readl(pHba->reply_port);
2098 if(m == EMPTY_QUEUE){
2099
2100 rmb();
2101 m = readl(pHba->reply_port);
2102 if(m == EMPTY_QUEUE){
2103
2104 printk(KERN_ERR"dpti: Could not get reply frame\n");
2105 goto out;
2106 }
2107 }
2108 if (pHba->reply_pool_pa <= m &&
2109 m < pHba->reply_pool_pa +
2110 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2111 reply = (u8 *)pHba->reply_pool +
2112 (m - pHba->reply_pool_pa);
2113 } else {
2114
2115 printk(KERN_ERR "dpti: reply frame not from pool\n");
2116 reply = (u8 *)bus_to_virt(m);
2117 }
2118
2119 if (readl(reply) & MSG_FAIL) {
2120 u32 old_m = readl(reply+28);
2121 void __iomem *msg;
2122 u32 old_context;
2123 PDEBUG("%s: Failed message\n",pHba->name);
2124 if(old_m >= 0x100000){
2125 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2126 writel(m,pHba->reply_port);
2127 continue;
2128 }
2129
2130 msg = pHba->msg_addr_virt + old_m;
2131 old_context = readl(msg+12);
2132 writel(old_context, reply+12);
2133 adpt_send_nop(pHba, old_m);
2134 }
2135 context = readl(reply+8);
2136 if(context & 0x40000000){
2137 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2138 if( p != NULL) {
2139 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2140 }
2141
2142 }
2143 if(context & 0x80000000){
2144 status = readl(reply+16);
2145 if(status >> 24){
2146 status &= 0xffff;
2147 } else {
2148 status = I2O_POST_WAIT_OK;
2149 }
2150 if(!(context & 0x40000000)) {
2151
2152
2153
2154
2155 cmd = scsi_host_find_tag(pHba->host,
2156 readl(reply + 12) - 1);
2157 if(cmd != NULL) {
2158 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2159 }
2160 }
2161 adpt_i2o_post_wait_complete(context, status);
2162 } else {
2163
2164
2165
2166
2167 cmd = scsi_host_find_tag(pHba->host,
2168 readl(reply + 12) - 1);
2169 if(cmd != NULL){
2170 scsi_dma_unmap(cmd);
2171 adpt_i2o_scsi_complete(reply, cmd);
2172 }
2173 }
2174 writel(m, pHba->reply_port);
2175 wmb();
2176 rmb();
2177 }
2178 handled = 1;
2179out: if(pHba->host)
2180 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2181 return IRQ_RETVAL(handled);
2182}
2183
2184static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2185{
2186 int i;
2187 u32 msg[MAX_MESSAGE_SIZE];
2188 u32* mptr;
2189 u32* lptr;
2190 u32 *lenptr;
2191 int direction;
2192 int scsidir;
2193 int nseg;
2194 u32 len;
2195 u32 reqlen;
2196 s32 rcode;
2197 dma_addr_t addr;
2198
2199 memset(msg, 0 , sizeof(msg));
2200 len = scsi_bufflen(cmd);
2201 direction = 0x00000000;
2202
2203 scsidir = 0x00000000;
2204 if(len) {
2205
2206
2207
2208
2209
2210
2211 switch(cmd->sc_data_direction){
2212 case DMA_FROM_DEVICE:
2213 scsidir =0x40000000;
2214 break;
2215 case DMA_TO_DEVICE:
2216 direction=0x04000000;
2217 scsidir =0x80000000;
2218 break;
2219 case DMA_NONE:
2220 break;
2221 case DMA_BIDIRECTIONAL:
2222 scsidir =0x40000000;
2223
2224 break;
2225 default:
2226 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2227 pHba->name, cmd->cmnd[0]);
2228 cmd->result = (DID_ERROR <<16);
2229 scsi_done(cmd);
2230 return 0;
2231 }
2232 }
2233
2234
2235 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2236 msg[2] = 0;
2237
2238 msg[3] = scsi_cmd_to_rq(cmd)->tag + 1;
2239
2240
2241 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2242 msg[5] = d->tid;
2243
2244
2245
2246
2247 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2248
2249 mptr=msg+7;
2250
2251
2252 memset(mptr, 0, 16);
2253 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2254 mptr+=4;
2255 lenptr=mptr++;
2256 if (dpt_dma64(pHba)) {
2257 reqlen = 16;
2258 *mptr++ = (0x7C<<24)+(2<<16)+0x02;
2259 *mptr++ = 1 << PAGE_SHIFT;
2260 } else {
2261 reqlen = 14;
2262 }
2263
2264
2265 nseg = scsi_dma_map(cmd);
2266 BUG_ON(nseg < 0);
2267 if (nseg) {
2268 struct scatterlist *sg;
2269
2270 len = 0;
2271 scsi_for_each_sg(cmd, sg, nseg, i) {
2272 lptr = mptr;
2273 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2274 len+=sg_dma_len(sg);
2275 addr = sg_dma_address(sg);
2276 *mptr++ = dma_low(addr);
2277 if (dpt_dma64(pHba))
2278 *mptr++ = dma_high(addr);
2279
2280 if (i == nseg - 1)
2281 *lptr = direction|0xD0000000|sg_dma_len(sg);
2282 }
2283 reqlen = mptr - msg;
2284 *lenptr = len;
2285
2286 if(cmd->underflow && len != cmd->underflow){
2287 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2288 len, cmd->underflow);
2289 }
2290 } else {
2291 *lenptr = len = 0;
2292 reqlen = 12;
2293 }
2294
2295
2296 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2297
2298
2299 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2300 if (rcode == 0) {
2301 return 0;
2302 }
2303 return rcode;
2304}
2305
2306
2307static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2308{
2309 struct Scsi_Host *host;
2310
2311 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2312 if (host == NULL) {
2313 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2314 return -1;
2315 }
2316 host->hostdata[0] = (unsigned long)pHba;
2317 pHba->host = host;
2318
2319 host->irq = pHba->pDev->irq;
2320
2321
2322
2323 host->io_port = 0;
2324 host->n_io_port = 0;
2325
2326 host->max_id = 16;
2327 host->max_lun = 256;
2328 host->max_channel = pHba->top_scsi_channel + 1;
2329 host->cmd_per_lun = 1;
2330 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2331 host->sg_tablesize = pHba->sg_tablesize;
2332 host->can_queue = pHba->post_fifo_size;
2333
2334 return 0;
2335}
2336
2337
2338static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd)
2339{
2340 adpt_hba* pHba;
2341 u32 hba_status;
2342 u32 dev_status;
2343 u32 reply_flags = readl(reply) & 0xff00;
2344
2345
2346
2347 u16 detailed_status = readl(reply+16) &0xffff;
2348 dev_status = (detailed_status & 0xff);
2349 hba_status = detailed_status >> 8;
2350
2351
2352 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2353
2354 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2355
2356 cmd->sense_buffer[0] = '\0';
2357
2358 if(!(reply_flags & MSG_FAIL)) {
2359 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2360 case I2O_SCSI_DSC_SUCCESS:
2361 cmd->result = (DID_OK << 16);
2362
2363 if (readl(reply+20) < cmd->underflow) {
2364 cmd->result = (DID_ERROR <<16);
2365 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2366 }
2367 break;
2368 case I2O_SCSI_DSC_REQUEST_ABORTED:
2369 cmd->result = (DID_ABORT << 16);
2370 break;
2371 case I2O_SCSI_DSC_PATH_INVALID:
2372 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2373 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2374 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2375 case I2O_SCSI_DSC_NO_ADAPTER:
2376 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2377 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2378 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2379 cmd->result = (DID_TIME_OUT << 16);
2380 break;
2381 case I2O_SCSI_DSC_ADAPTER_BUSY:
2382 case I2O_SCSI_DSC_BUS_BUSY:
2383 cmd->result = (DID_BUS_BUSY << 16);
2384 break;
2385 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2386 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2387 cmd->result = (DID_RESET << 16);
2388 break;
2389 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2390 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2391 cmd->result = (DID_PARITY << 16);
2392 break;
2393 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2394 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2395 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2396 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2397 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2398 case I2O_SCSI_DSC_DATA_OVERRUN:
2399 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2400 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2401 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2402 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2403 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2404 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2405 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2406 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2407 case I2O_SCSI_DSC_INVALID_CDB:
2408 case I2O_SCSI_DSC_LUN_INVALID:
2409 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2410 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2411 case I2O_SCSI_DSC_NO_NEXUS:
2412 case I2O_SCSI_DSC_CDB_RECEIVED:
2413 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2414 case I2O_SCSI_DSC_QUEUE_FROZEN:
2415 case I2O_SCSI_DSC_REQUEST_INVALID:
2416 default:
2417 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2418 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2419 hba_status, dev_status, cmd->cmnd[0]);
2420 cmd->result = (DID_ERROR << 16);
2421 break;
2422 }
2423
2424
2425
2426 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2427 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2428
2429 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2430 if(cmd->sense_buffer[0] == 0x70 &&
2431 cmd->sense_buffer[2] == DATA_PROTECT ){
2432
2433 cmd->result = (DID_TIME_OUT << 16);
2434 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2435 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2436 hba_status, dev_status, cmd->cmnd[0]);
2437
2438 }
2439 }
2440 } else {
2441
2442
2443
2444
2445 cmd->result = (DID_TIME_OUT << 16);
2446 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2447 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2448 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2449 }
2450
2451 cmd->result |= (dev_status);
2452
2453 scsi_done(cmd);
2454}
2455
2456
2457static s32 adpt_rescan(adpt_hba* pHba)
2458{
2459 s32 rcode;
2460 ulong flags = 0;
2461
2462 if(pHba->host)
2463 spin_lock_irqsave(pHba->host->host_lock, flags);
2464 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2465 goto out;
2466 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2467 goto out;
2468 rcode = 0;
2469out: if(pHba->host)
2470 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2471 return rcode;
2472}
2473
2474
2475static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2476{
2477 int i;
2478 int max;
2479 int tid;
2480 struct i2o_device *d;
2481 i2o_lct *lct = pHba->lct;
2482 u8 bus_no = 0;
2483 s16 scsi_id;
2484 u64 scsi_lun;
2485 u32 buf[10];
2486 struct adpt_device* pDev = NULL;
2487 struct i2o_device* pI2o_dev = NULL;
2488
2489 if (lct == NULL) {
2490 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2491 return -1;
2492 }
2493
2494 max = lct->table_size;
2495 max -= 3;
2496 max /= 9;
2497
2498
2499 for (d = pHba->devices; d; d = d->next) {
2500 pDev =(struct adpt_device*) d->owner;
2501 if(!pDev){
2502 continue;
2503 }
2504 pDev->state |= DPTI_DEV_UNSCANNED;
2505 }
2506
2507 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2508
2509 for(i=0;i<max;i++) {
2510 if( lct->lct_entry[i].user_tid != 0xfff){
2511 continue;
2512 }
2513
2514 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2515 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2516 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2517 tid = lct->lct_entry[i].tid;
2518 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2519 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2520 continue;
2521 }
2522 bus_no = buf[0]>>16;
2523 if (bus_no >= MAX_CHANNEL) {
2524 printk(KERN_WARNING
2525 "%s: Channel number %d out of range\n",
2526 pHba->name, bus_no);
2527 continue;
2528 }
2529
2530 scsi_id = buf[1];
2531 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2532 pDev = pHba->channel[bus_no].device[scsi_id];
2533
2534 while(pDev) {
2535 if(pDev->scsi_lun == scsi_lun) {
2536 break;
2537 }
2538 pDev = pDev->next_lun;
2539 }
2540 if(!pDev ) {
2541 d = kmalloc(sizeof(struct i2o_device),
2542 GFP_ATOMIC);
2543 if(d==NULL)
2544 {
2545 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2546 return -ENOMEM;
2547 }
2548
2549 d->controller = pHba;
2550 d->next = NULL;
2551
2552 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2553
2554 d->flags = 0;
2555 adpt_i2o_report_hba_unit(pHba, d);
2556 adpt_i2o_install_device(pHba, d);
2557
2558 pDev = pHba->channel[bus_no].device[scsi_id];
2559 if( pDev == NULL){
2560 pDev =
2561 kzalloc(sizeof(struct adpt_device),
2562 GFP_ATOMIC);
2563 if(pDev == NULL) {
2564 return -ENOMEM;
2565 }
2566 pHba->channel[bus_no].device[scsi_id] = pDev;
2567 } else {
2568 while (pDev->next_lun) {
2569 pDev = pDev->next_lun;
2570 }
2571 pDev = pDev->next_lun =
2572 kzalloc(sizeof(struct adpt_device),
2573 GFP_ATOMIC);
2574 if(pDev == NULL) {
2575 return -ENOMEM;
2576 }
2577 }
2578 pDev->tid = d->lct_data.tid;
2579 pDev->scsi_channel = bus_no;
2580 pDev->scsi_id = scsi_id;
2581 pDev->scsi_lun = scsi_lun;
2582 pDev->pI2o_dev = d;
2583 d->owner = pDev;
2584 pDev->type = (buf[0])&0xff;
2585 pDev->flags = (buf[0]>>8)&0xff;
2586
2587 if(scsi_id > pHba->top_scsi_id){
2588 pHba->top_scsi_id = scsi_id;
2589 }
2590 if(scsi_lun > pHba->top_scsi_lun){
2591 pHba->top_scsi_lun = scsi_lun;
2592 }
2593 continue;
2594 }
2595
2596
2597 while(pDev) {
2598 if(pDev->scsi_lun == scsi_lun) {
2599 if(!scsi_device_online(pDev->pScsi_dev)) {
2600 printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2601 pHba->name,bus_no,scsi_id,scsi_lun);
2602 if (pDev->pScsi_dev) {
2603 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2604 }
2605 }
2606 d = pDev->pI2o_dev;
2607 if(d->lct_data.tid != tid) {
2608 pDev->tid = tid;
2609 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2610 if (pDev->pScsi_dev) {
2611 pDev->pScsi_dev->changed = TRUE;
2612 pDev->pScsi_dev->removable = TRUE;
2613 }
2614 }
2615
2616 pDev->state = DPTI_DEV_ONLINE;
2617 break;
2618 }
2619 pDev = pDev->next_lun;
2620 }
2621 }
2622 }
2623 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2624 pDev =(struct adpt_device*) pI2o_dev->owner;
2625 if(!pDev){
2626 continue;
2627 }
2628
2629
2630 if (pDev->state & DPTI_DEV_UNSCANNED){
2631 pDev->state = DPTI_DEV_OFFLINE;
2632 printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2633 if (pDev->pScsi_dev) {
2634 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2635 }
2636 }
2637 }
2638 return 0;
2639}
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651static int adpt_i2o_activate_hba(adpt_hba* pHba)
2652{
2653 int rcode;
2654
2655 if(pHba->initialized ) {
2656 if (adpt_i2o_status_get(pHba) < 0) {
2657 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2658 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2659 return rcode;
2660 }
2661 if (adpt_i2o_status_get(pHba) < 0) {
2662 printk(KERN_INFO "HBA not responding.\n");
2663 return -1;
2664 }
2665 }
2666
2667 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2668 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2669 return -1;
2670 }
2671
2672 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2673 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2674 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2675 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2676 adpt_i2o_reset_hba(pHba);
2677 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2678 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2679 return -1;
2680 }
2681 }
2682 } else {
2683 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2684 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2685 return rcode;
2686 }
2687
2688 }
2689
2690 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2691 return -1;
2692 }
2693
2694
2695
2696 if (adpt_i2o_hrt_get(pHba) < 0) {
2697 return -1;
2698 }
2699
2700 return 0;
2701}
2702
2703
2704
2705
2706
2707static int adpt_i2o_online_hba(adpt_hba* pHba)
2708{
2709 if (adpt_i2o_systab_send(pHba) < 0)
2710 return -1;
2711
2712
2713 if (adpt_i2o_enable_hba(pHba) < 0)
2714 return -1;
2715
2716
2717 return 0;
2718}
2719
2720static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2721{
2722 u32 __iomem *msg;
2723 ulong timeout = jiffies + 5*HZ;
2724
2725 while(m == EMPTY_QUEUE){
2726 rmb();
2727 m = readl(pHba->post_port);
2728 if(m != EMPTY_QUEUE){
2729 break;
2730 }
2731 if(time_after(jiffies,timeout)){
2732 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2733 return 2;
2734 }
2735 schedule_timeout_uninterruptible(1);
2736 }
2737 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2738 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2739 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2740 writel( 0,&msg[2]);
2741 wmb();
2742
2743 writel(m, pHba->post_port);
2744 wmb();
2745 return 0;
2746}
2747
2748static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2749{
2750 u8 *status;
2751 dma_addr_t addr;
2752 u32 __iomem *msg = NULL;
2753 int i;
2754 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2755 u32 m;
2756
2757 do {
2758 rmb();
2759 m = readl(pHba->post_port);
2760 if (m != EMPTY_QUEUE) {
2761 break;
2762 }
2763
2764 if(time_after(jiffies,timeout)){
2765 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2766 return -ETIMEDOUT;
2767 }
2768 schedule_timeout_uninterruptible(1);
2769 } while(m == EMPTY_QUEUE);
2770
2771 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2772
2773 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2774 if (!status) {
2775 adpt_send_nop(pHba, m);
2776 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2777 pHba->name);
2778 return -ENOMEM;
2779 }
2780
2781 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2782 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2783 writel(0, &msg[2]);
2784 writel(0x0106, &msg[3]);
2785 writel(4096, &msg[4]);
2786 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]);
2787 writel(0xD0000004, &msg[6]);
2788 writel((u32)addr, &msg[7]);
2789
2790 writel(m, pHba->post_port);
2791 wmb();
2792
2793
2794 do {
2795 if (*status) {
2796 if (*status != 0x01 ) {
2797 break;
2798 }
2799 }
2800 rmb();
2801 if(time_after(jiffies,timeout)){
2802 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2803
2804
2805
2806
2807 return -ETIMEDOUT;
2808 }
2809 schedule_timeout_uninterruptible(1);
2810 } while (1);
2811
2812
2813
2814 if(*status != 0x04 ) {
2815 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2816 return -2;
2817 }
2818 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2819
2820 if(pHba->reply_pool != NULL) {
2821 dma_free_coherent(&pHba->pDev->dev,
2822 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2823 pHba->reply_pool, pHba->reply_pool_pa);
2824 }
2825
2826 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2827 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2828 &pHba->reply_pool_pa, GFP_KERNEL);
2829 if (!pHba->reply_pool) {
2830 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2831 return -ENOMEM;
2832 }
2833
2834 for(i = 0; i < pHba->reply_fifo_size; i++) {
2835 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2836 pHba->reply_port);
2837 wmb();
2838 }
2839 adpt_i2o_status_get(pHba);
2840 return 0;
2841}
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855static s32 adpt_i2o_status_get(adpt_hba* pHba)
2856{
2857 ulong timeout;
2858 u32 m;
2859 u32 __iomem *msg;
2860 u8 *status_block=NULL;
2861
2862 if(pHba->status_block == NULL) {
2863 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2864 sizeof(i2o_status_block),
2865 &pHba->status_block_pa, GFP_KERNEL);
2866 if(pHba->status_block == NULL) {
2867 printk(KERN_ERR
2868 "dpti%d: Get Status Block failed; Out of memory. \n",
2869 pHba->unit);
2870 return -ENOMEM;
2871 }
2872 }
2873 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2874 status_block = (u8*)(pHba->status_block);
2875 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2876 do {
2877 rmb();
2878 m = readl(pHba->post_port);
2879 if (m != EMPTY_QUEUE) {
2880 break;
2881 }
2882 if(time_after(jiffies,timeout)){
2883 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2884 pHba->name);
2885 return -ETIMEDOUT;
2886 }
2887 schedule_timeout_uninterruptible(1);
2888 } while(m==EMPTY_QUEUE);
2889
2890
2891 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2892
2893 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2894 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2895 writel(1, &msg[2]);
2896 writel(0, &msg[3]);
2897 writel(0, &msg[4]);
2898 writel(0, &msg[5]);
2899 writel( dma_low(pHba->status_block_pa), &msg[6]);
2900 writel( dma_high(pHba->status_block_pa), &msg[7]);
2901 writel(sizeof(i2o_status_block), &msg[8]);
2902
2903
2904 writel(m, pHba->post_port);
2905 wmb();
2906
2907 while(status_block[87]!=0xff){
2908 if(time_after(jiffies,timeout)){
2909 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2910 pHba->unit);
2911 return -ETIMEDOUT;
2912 }
2913 rmb();
2914 schedule_timeout_uninterruptible(1);
2915 }
2916
2917
2918 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2919 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2920 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2921 }
2922
2923 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2924 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2925 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2926 }
2927
2928
2929 if (dpt_dma64(pHba)) {
2930 pHba->sg_tablesize
2931 = ((pHba->status_block->inbound_frame_size * 4
2932 - 14 * sizeof(u32))
2933 / (sizeof(struct sg_simple_element) + sizeof(u32)));
2934 } else {
2935 pHba->sg_tablesize
2936 = ((pHba->status_block->inbound_frame_size * 4
2937 - 12 * sizeof(u32))
2938 / sizeof(struct sg_simple_element));
2939 }
2940 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2941 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2942 }
2943
2944
2945#ifdef DEBUG
2946 printk("dpti%d: State = ",pHba->unit);
2947 switch(pHba->status_block->iop_state) {
2948 case 0x01:
2949 printk("INIT\n");
2950 break;
2951 case 0x02:
2952 printk("RESET\n");
2953 break;
2954 case 0x04:
2955 printk("HOLD\n");
2956 break;
2957 case 0x05:
2958 printk("READY\n");
2959 break;
2960 case 0x08:
2961 printk("OPERATIONAL\n");
2962 break;
2963 case 0x10:
2964 printk("FAILED\n");
2965 break;
2966 case 0x11:
2967 printk("FAULTED\n");
2968 break;
2969 default:
2970 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2971 }
2972#endif
2973 return 0;
2974}
2975
2976
2977
2978
2979static int adpt_i2o_lct_get(adpt_hba* pHba)
2980{
2981 u32 msg[8];
2982 int ret;
2983 u32 buf[16];
2984
2985 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2986 pHba->lct_size = pHba->status_block->expected_lct_size;
2987 }
2988 do {
2989 if (pHba->lct == NULL) {
2990 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
2991 pHba->lct_size, &pHba->lct_pa,
2992 GFP_ATOMIC);
2993 if(pHba->lct == NULL) {
2994 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2995 pHba->name);
2996 return -ENOMEM;
2997 }
2998 }
2999 memset(pHba->lct, 0, pHba->lct_size);
3000
3001 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3002 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3003 msg[2] = 0;
3004 msg[3] = 0;
3005 msg[4] = 0xFFFFFFFF;
3006 msg[5] = 0x00000000;
3007 msg[6] = 0xD0000000|pHba->lct_size;
3008 msg[7] = (u32)pHba->lct_pa;
3009
3010 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3011 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3012 pHba->name, ret);
3013 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3014 return ret;
3015 }
3016
3017 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3018 pHba->lct_size = pHba->lct->table_size << 2;
3019 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3020 pHba->lct, pHba->lct_pa);
3021 pHba->lct = NULL;
3022 }
3023 } while (pHba->lct == NULL);
3024
3025 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3026
3027
3028
3029 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3030 pHba->FwDebugBufferSize = buf[1];
3031 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3032 pHba->FwDebugBufferSize);
3033 if (pHba->FwDebugBuffer_P) {
3034 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3035 FW_DEBUG_FLAGS_OFFSET;
3036 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3037 FW_DEBUG_BLED_OFFSET;
3038 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3039 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3040 FW_DEBUG_STR_LENGTH_OFFSET;
3041 pHba->FwDebugBuffer_P += buf[2];
3042 pHba->FwDebugFlags = 0;
3043 }
3044 }
3045
3046 return 0;
3047}
3048
3049static int adpt_i2o_build_sys_table(void)
3050{
3051 adpt_hba* pHba = hba_chain;
3052 int count = 0;
3053
3054 if (sys_tbl)
3055 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3056 sys_tbl, sys_tbl_pa);
3057
3058 sys_tbl_len = sizeof(struct i2o_sys_tbl) +
3059 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3060
3061 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3062 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3063 if (!sys_tbl) {
3064 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3065 return -ENOMEM;
3066 }
3067
3068 sys_tbl->num_entries = hba_count;
3069 sys_tbl->version = I2OVERSION;
3070 sys_tbl->change_ind = sys_tbl_ind++;
3071
3072 for(pHba = hba_chain; pHba; pHba = pHba->next) {
3073 u64 addr;
3074
3075 if (adpt_i2o_status_get(pHba)) {
3076 sys_tbl->num_entries--;
3077 continue;
3078 }
3079
3080 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3081 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3082 sys_tbl->iops[count].seg_num = 0;
3083 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3084 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3085 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3086 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3087 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1;
3088 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3089 addr = pHba->base_addr_phys + 0x40;
3090 sys_tbl->iops[count].inbound_low = dma_low(addr);
3091 sys_tbl->iops[count].inbound_high = dma_high(addr);
3092
3093 count++;
3094 }
3095
3096#ifdef DEBUG
3097{
3098 u32 *table = (u32*)sys_tbl;
3099 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3100 for(count = 0; count < (sys_tbl_len >>2); count++) {
3101 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3102 count, table[count]);
3103 }
3104}
3105#endif
3106
3107 return 0;
3108}
3109
3110
3111
3112
3113
3114
3115static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3116{
3117 char buf[64];
3118 int unit = d->lct_data.tid;
3119
3120 printk(KERN_INFO "TID %3.3d ", unit);
3121
3122 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3123 {
3124 buf[16]=0;
3125 printk(" Vendor: %-12.12s", buf);
3126 }
3127 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3128 {
3129 buf[16]=0;
3130 printk(" Device: %-12.12s", buf);
3131 }
3132 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3133 {
3134 buf[8]=0;
3135 printk(" Rev: %-12.12s\n", buf);
3136 }
3137#ifdef DEBUG
3138 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3139 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3140 printk(KERN_INFO "\tFlags: ");
3141
3142 if(d->lct_data.device_flags&(1<<0))
3143 printk("C");
3144 if(d->lct_data.device_flags&(1<<1))
3145 printk("U");
3146 if(!(d->lct_data.device_flags&(1<<4)))
3147 printk("P");
3148 if(!(d->lct_data.device_flags&(1<<5)))
3149 printk("M");
3150 printk("\n");
3151#endif
3152}
3153
3154#ifdef DEBUG
3155
3156
3157
3158static const char *adpt_i2o_get_class_name(int class)
3159{
3160 int idx = 16;
3161 static char *i2o_class_name[] = {
3162 "Executive",
3163 "Device Driver Module",
3164 "Block Device",
3165 "Tape Device",
3166 "LAN Interface",
3167 "WAN Interface",
3168 "Fibre Channel Port",
3169 "Fibre Channel Device",
3170 "SCSI Device",
3171 "ATE Port",
3172 "ATE Device",
3173 "Floppy Controller",
3174 "Floppy Device",
3175 "Secondary Bus Port",
3176 "Peer Transport Agent",
3177 "Peer Transport",
3178 "Unknown"
3179 };
3180
3181 switch(class&0xFFF) {
3182 case I2O_CLASS_EXECUTIVE:
3183 idx = 0; break;
3184 case I2O_CLASS_DDM:
3185 idx = 1; break;
3186 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3187 idx = 2; break;
3188 case I2O_CLASS_SEQUENTIAL_STORAGE:
3189 idx = 3; break;
3190 case I2O_CLASS_LAN:
3191 idx = 4; break;
3192 case I2O_CLASS_WAN:
3193 idx = 5; break;
3194 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3195 idx = 6; break;
3196 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3197 idx = 7; break;
3198 case I2O_CLASS_SCSI_PERIPHERAL:
3199 idx = 8; break;
3200 case I2O_CLASS_ATE_PORT:
3201 idx = 9; break;
3202 case I2O_CLASS_ATE_PERIPHERAL:
3203 idx = 10; break;
3204 case I2O_CLASS_FLOPPY_CONTROLLER:
3205 idx = 11; break;
3206 case I2O_CLASS_FLOPPY_DEVICE:
3207 idx = 12; break;
3208 case I2O_CLASS_BUS_ADAPTER_PORT:
3209 idx = 13; break;
3210 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3211 idx = 14; break;
3212 case I2O_CLASS_PEER_TRANSPORT:
3213 idx = 15; break;
3214 }
3215 return i2o_class_name[idx];
3216}
3217#endif
3218
3219
3220static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3221{
3222 u32 msg[6];
3223 int ret, size = sizeof(i2o_hrt);
3224
3225 do {
3226 if (pHba->hrt == NULL) {
3227 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3228 size, &pHba->hrt_pa, GFP_KERNEL);
3229 if (pHba->hrt == NULL) {
3230 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3231 return -ENOMEM;
3232 }
3233 }
3234
3235 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3236 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3237 msg[2]= 0;
3238 msg[3]= 0;
3239 msg[4]= (0xD0000000 | size);
3240 msg[5]= (u32)pHba->hrt_pa;
3241
3242 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3243 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3244 return ret;
3245 }
3246
3247 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3248 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3249 dma_free_coherent(&pHba->pDev->dev, size,
3250 pHba->hrt, pHba->hrt_pa);
3251 size = newsize;
3252 pHba->hrt = NULL;
3253 }
3254 } while(pHba->hrt == NULL);
3255 return 0;
3256}
3257
3258
3259
3260
3261static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3262 int group, int field, void *buf, int buflen)
3263{
3264 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3265 u8 *opblk_va;
3266 dma_addr_t opblk_pa;
3267 u8 *resblk_va;
3268 dma_addr_t resblk_pa;
3269
3270 int size;
3271
3272
3273 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3274 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3275 if (resblk_va == NULL) {
3276 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3277 return -ENOMEM;
3278 }
3279
3280 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3281 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3282 if (opblk_va == NULL) {
3283 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3284 resblk_va, resblk_pa);
3285 printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
3286 pHba->name);
3287 return -ENOMEM;
3288 }
3289 if (field == -1)
3290 opblk[4] = -1;
3291
3292 memcpy(opblk_va, opblk, sizeof(opblk));
3293 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3294 opblk_va, opblk_pa, sizeof(opblk),
3295 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3296 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3297 if (size == -ETIME) {
3298 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3299 resblk_va, resblk_pa);
3300 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3301 return -ETIME;
3302 } else if (size == -EINTR) {
3303 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3304 resblk_va, resblk_pa);
3305 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3306 return -EINTR;
3307 }
3308
3309 memcpy(buf, resblk_va+8, buflen);
3310
3311 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3312 resblk_va, resblk_pa);
3313 if (size < 0)
3314 return size;
3315
3316 return buflen;
3317}
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3329 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3330 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3331{
3332 u32 msg[9];
3333 u32 *res = (u32 *)resblk_va;
3334 int wait_status;
3335
3336 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3337 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3338 msg[2] = 0;
3339 msg[3] = 0;
3340 msg[4] = 0;
3341 msg[5] = 0x54000000 | oplen;
3342 msg[6] = (u32)opblk_pa;
3343 msg[7] = 0xD0000000 | reslen;
3344 msg[8] = (u32)resblk_pa;
3345
3346 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3347 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3348 return wait_status;
3349 }
3350
3351 if (res[1]&0x00FF0000) {
3352 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3353 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3354 pHba->name,
3355 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3356 : "PARAMS_GET",
3357 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3358 return -((res[1] >> 16) & 0xFF);
3359 }
3360
3361 return 4 + ((res[1] & 0x0000FFFF) << 2);
3362}
3363
3364
3365static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3366{
3367 u32 msg[4];
3368 int ret;
3369
3370 adpt_i2o_status_get(pHba);
3371
3372
3373
3374 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3375 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3376 return 0;
3377 }
3378
3379 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3380 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3381 msg[2] = 0;
3382 msg[3] = 0;
3383
3384 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3385 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3386 pHba->unit, -ret);
3387 } else {
3388 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3389 }
3390
3391 adpt_i2o_status_get(pHba);
3392 return ret;
3393}
3394
3395
3396
3397
3398
3399static int adpt_i2o_enable_hba(adpt_hba* pHba)
3400{
3401 u32 msg[4];
3402 int ret;
3403
3404 adpt_i2o_status_get(pHba);
3405 if(!pHba->status_block){
3406 return -ENOMEM;
3407 }
3408
3409 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3410 return 0;
3411
3412 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3413 return -EINVAL;
3414
3415 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3416 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3417 msg[2]= 0;
3418 msg[3]= 0;
3419
3420 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3421 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3422 pHba->name, ret);
3423 } else {
3424 PDEBUG("%s: Enabled.\n", pHba->name);
3425 }
3426
3427 adpt_i2o_status_get(pHba);
3428 return ret;
3429}
3430
3431
3432static int adpt_i2o_systab_send(adpt_hba* pHba)
3433{
3434 u32 msg[12];
3435 int ret;
3436
3437 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3438 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3439 msg[2] = 0;
3440 msg[3] = 0;
3441 msg[4] = (0<<16) | ((pHba->unit+2) << 12);
3442 msg[5] = 0;
3443
3444
3445
3446
3447
3448
3449 msg[6] = 0x54000000 | sys_tbl_len;
3450 msg[7] = (u32)sys_tbl_pa;
3451 msg[8] = 0x54000000 | 0;
3452 msg[9] = 0;
3453 msg[10] = 0xD4000000 | 0;
3454 msg[11] = 0;
3455
3456 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3457 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3458 pHba->name, ret);
3459 }
3460#ifdef DEBUG
3461 else {
3462 PINFO("%s: SysTab set.\n", pHba->name);
3463 }
3464#endif
3465
3466 return ret;
3467}
3468
3469
3470
3471
3472
3473
3474
3475
3476#ifdef UARTDELAY
3477
3478static static void adpt_delay(int millisec)
3479{
3480 int i;
3481 for (i = 0; i < millisec; i++) {
3482 udelay(1000);
3483 }
3484}
3485
3486#endif
3487
3488static struct scsi_host_template driver_template = {
3489 .module = THIS_MODULE,
3490 .name = "dpt_i2o",
3491 .proc_name = "dpt_i2o",
3492 .show_info = adpt_show_info,
3493 .info = adpt_info,
3494 .queuecommand = adpt_queue,
3495 .eh_abort_handler = adpt_abort,
3496 .eh_device_reset_handler = adpt_device_reset,
3497 .eh_bus_reset_handler = adpt_bus_reset,
3498 .eh_host_reset_handler = adpt_reset,
3499 .bios_param = adpt_bios_param,
3500 .slave_configure = adpt_slave_configure,
3501 .can_queue = MAX_TO_IOP_MESSAGES,
3502 .this_id = 7,
3503};
3504
3505static int __init adpt_init(void)
3506{
3507 int error;
3508 adpt_hba *pHba, *next;
3509
3510 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3511
3512 error = adpt_detect(&driver_template);
3513 if (error < 0)
3514 return error;
3515 if (hba_chain == NULL)
3516 return -ENODEV;
3517
3518 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3519 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3520 if (error)
3521 goto fail;
3522 scsi_scan_host(pHba->host);
3523 }
3524 return 0;
3525fail:
3526 for (pHba = hba_chain; pHba; pHba = next) {
3527 next = pHba->next;
3528 scsi_remove_host(pHba->host);
3529 }
3530 return error;
3531}
3532
3533static void __exit adpt_exit(void)
3534{
3535 adpt_hba *pHba, *next;
3536
3537 for (pHba = hba_chain; pHba; pHba = next) {
3538 next = pHba->next;
3539 adpt_release(pHba);
3540 }
3541}
3542
3543module_init(adpt_init);
3544module_exit(adpt_exit);
3545
3546MODULE_LICENSE("GPL");
3547