1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/module.h>
30#include <linux/pgtable.h>
31
32MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
33MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
34
35
36
37#include <linux/ioctl.h>
38#include <linux/uaccess.h>
39
40#include <linux/stat.h>
41#include <linux/slab.h>
42#include <linux/pci.h>
43#include <linux/proc_fs.h>
44#include <linux/blkdev.h>
45#include <linux/delay.h>
46#include <linux/interrupt.h>
47#include <linux/kernel.h>
48#include <linux/sched.h>
49#include <linux/reboot.h>
50#include <linux/spinlock.h>
51#include <linux/dma-mapping.h>
52
53#include <linux/timer.h>
54#include <linux/string.h>
55#include <linux/ioport.h>
56#include <linux/mutex.h>
57
58#include <asm/processor.h>
59#include <asm/io.h>
60
61#include <scsi/scsi.h>
62#include <scsi/scsi_cmnd.h>
63#include <scsi/scsi_device.h>
64#include <scsi/scsi_host.h>
65#include <scsi/scsi_tcq.h>
66
67#include "dpt/dptsig.h"
68#include "dpti.h"
69
70
71
72
73
74
75static DEFINE_MUTEX(adpt_mutex);
76static dpt_sig_S DPTI_sig = {
77 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
78#ifdef __i386__
79 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
80#elif defined(__ia64__)
81 PROC_INTEL, PROC_IA64,
82#elif defined(__sparc__)
83 PROC_ULTRASPARC, PROC_ULTRASPARC,
84#elif defined(__alpha__)
85 PROC_ALPHA, PROC_ALPHA,
86#else
87 (-1),(-1),
88#endif
89 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
90 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
91 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
92};
93
94
95
96
97
98
99
100
101
102static DEFINE_MUTEX(adpt_configuration_lock);
103
104static struct i2o_sys_tbl *sys_tbl;
105static dma_addr_t sys_tbl_pa;
106static int sys_tbl_ind;
107static int sys_tbl_len;
108
109static adpt_hba* hba_chain = NULL;
110static int hba_count = 0;
111
112static struct class *adpt_sysfs_class;
113
114static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
115#ifdef CONFIG_COMPAT
116static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
117#endif
118
119static const struct file_operations adpt_fops = {
120 .unlocked_ioctl = adpt_unlocked_ioctl,
121 .open = adpt_open,
122 .release = adpt_close,
123#ifdef CONFIG_COMPAT
124 .compat_ioctl = compat_adpt_ioctl,
125#endif
126 .llseek = noop_llseek,
127};
128
129
130
131
132struct adpt_i2o_post_wait_data
133{
134 int status;
135 u32 id;
136 adpt_wait_queue_head_t *wq;
137 struct adpt_i2o_post_wait_data *next;
138};
139
140static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
141static u32 adpt_post_wait_id = 0;
142static DEFINE_SPINLOCK(adpt_post_wait_lock);
143
144
145
146
147
148
149
150static inline int dpt_dma64(adpt_hba *pHba)
151{
152 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
153}
154
155static inline u32 dma_high(dma_addr_t addr)
156{
157 return upper_32_bits(addr);
158}
159
160static inline u32 dma_low(dma_addr_t addr)
161{
162 return (u32)addr;
163}
164
165static u8 adpt_read_blink_led(adpt_hba* host)
166{
167 if (host->FwDebugBLEDflag_P) {
168 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
169 return readb(host->FwDebugBLEDvalue_P);
170 }
171 }
172 return 0;
173}
174
175
176
177
178
179
180#ifdef MODULE
181static struct pci_device_id dptids[] = {
182 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
183 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
184 { 0, }
185};
186#endif
187
188MODULE_DEVICE_TABLE(pci,dptids);
189
190static int adpt_detect(struct scsi_host_template* sht)
191{
192 struct pci_dev *pDev = NULL;
193 adpt_hba *pHba;
194 adpt_hba *next;
195
196 PINFO("Detecting Adaptec I2O RAID controllers...\n");
197
198
199 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
200 if(pDev->device == PCI_DPT_DEVICE_ID ||
201 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
202 if(adpt_install_hba(sht, pDev) ){
203 PERROR("Could not Init an I2O RAID device\n");
204 PERROR("Will not try to detect others.\n");
205 return hba_count-1;
206 }
207 pci_dev_get(pDev);
208 }
209 }
210
211
212 for (pHba = hba_chain; pHba; pHba = next) {
213 next = pHba->next;
214
215 if (adpt_i2o_activate_hba(pHba) < 0) {
216 adpt_i2o_delete_hba(pHba);
217 }
218 }
219
220
221
222
223rebuild_sys_tab:
224 if (hba_chain == NULL)
225 return 0;
226
227
228
229
230
231 if (adpt_i2o_build_sys_table() < 0) {
232 adpt_i2o_sys_shutdown();
233 return 0;
234 }
235
236 PDEBUG("HBA's in HOLD state\n");
237
238
239 for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 if (adpt_i2o_online_hba(pHba) < 0) {
241 adpt_i2o_delete_hba(pHba);
242 goto rebuild_sys_tab;
243 }
244 }
245
246
247 PDEBUG("HBA's in OPERATIONAL state\n");
248
249 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
250 for (pHba = hba_chain; pHba; pHba = next) {
251 next = pHba->next;
252 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 if (adpt_i2o_lct_get(pHba) < 0){
254 adpt_i2o_delete_hba(pHba);
255 continue;
256 }
257
258 if (adpt_i2o_parse_lct(pHba) < 0){
259 adpt_i2o_delete_hba(pHba);
260 continue;
261 }
262 adpt_inquiry(pHba);
263 }
264
265 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 if (IS_ERR(adpt_sysfs_class)) {
267 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 adpt_sysfs_class = NULL;
269 }
270
271 for (pHba = hba_chain; pHba; pHba = next) {
272 next = pHba->next;
273 if (adpt_scsi_host_alloc(pHba, sht) < 0){
274 adpt_i2o_delete_hba(pHba);
275 continue;
276 }
277 pHba->initialized = TRUE;
278 pHba->state &= ~DPTI_STATE_RESET;
279 if (adpt_sysfs_class) {
280 struct device *dev = device_create(adpt_sysfs_class,
281 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
282 "dpti%d", pHba->unit);
283 if (IS_ERR(dev)) {
284 printk(KERN_WARNING"dpti%d: unable to "
285 "create device in dpt_i2o class\n",
286 pHba->unit);
287 }
288 }
289 }
290
291
292
293
294 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
295 adpt_i2o_sys_shutdown();
296 return 0;
297 }
298 return hba_count;
299}
300
301
302static void adpt_release(adpt_hba *pHba)
303{
304 struct Scsi_Host *shost = pHba->host;
305
306 scsi_remove_host(shost);
307
308 adpt_i2o_delete_hba(pHba);
309 scsi_host_put(shost);
310}
311
312
313static void adpt_inquiry(adpt_hba* pHba)
314{
315 u32 msg[17];
316 u32 *mptr;
317 u32 *lenptr;
318 int direction;
319 int scsidir;
320 u32 len;
321 u32 reqlen;
322 u8* buf;
323 dma_addr_t addr;
324 u8 scb[16];
325 s32 rcode;
326
327 memset(msg, 0, sizeof(msg));
328 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
329 if(!buf){
330 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
331 return;
332 }
333 memset((void*)buf, 0, 36);
334
335 len = 36;
336 direction = 0x00000000;
337 scsidir =0x40000000;
338
339 if (dpt_dma64(pHba))
340 reqlen = 17;
341 else
342 reqlen = 14;
343
344 msg[0] = reqlen<<16 | SGL_OFFSET_12;
345 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
346 msg[2] = 0;
347 msg[3] = 0;
348
349 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
350 msg[5] = ADAPTER_TID | 1<<16 ;
351
352
353
354
355 msg[6] = scsidir|0x20a00000| 6 ;
356
357 mptr=msg+7;
358
359 memset(scb, 0, sizeof(scb));
360
361 scb[0] = INQUIRY;
362 scb[1] = 0;
363 scb[2] = 0;
364 scb[3] = 0;
365 scb[4] = 36;
366 scb[5] = 0;
367
368
369 memcpy(mptr, scb, sizeof(scb));
370 mptr+=4;
371 lenptr=mptr++;
372
373
374 *lenptr = len;
375 if (dpt_dma64(pHba)) {
376 *mptr++ = (0x7C<<24)+(2<<16)+0x02;
377 *mptr++ = 1 << PAGE_SHIFT;
378 *mptr++ = 0xD0000000|direction|len;
379 *mptr++ = dma_low(addr);
380 *mptr++ = dma_high(addr);
381 } else {
382 *mptr++ = 0xD0000000|direction|len;
383 *mptr++ = addr;
384 }
385
386
387 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
388 if (rcode != 0) {
389 sprintf(pHba->detail, "Adaptec I2O RAID");
390 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
391 if (rcode != -ETIME && rcode != -EINTR)
392 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
393 } else {
394 memset(pHba->detail, 0, sizeof(pHba->detail));
395 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
396 memcpy(&(pHba->detail[16]), " Model: ", 8);
397 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
398 memcpy(&(pHba->detail[40]), " FW: ", 4);
399 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
400 pHba->detail[48] = '\0';
401 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
402 }
403 adpt_i2o_status_get(pHba);
404 return ;
405}
406
407
408static int adpt_slave_configure(struct scsi_device * device)
409{
410 struct Scsi_Host *host = device->host;
411 adpt_hba* pHba;
412
413 pHba = (adpt_hba *) host->hostdata[0];
414
415 if (host->can_queue && device->tagged_supported) {
416 scsi_change_queue_depth(device,
417 host->can_queue - 1);
418 }
419 return 0;
420}
421
422static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
423{
424 adpt_hba* pHba = NULL;
425 struct adpt_device* pDev = NULL;
426
427 cmd->scsi_done = done;
428
429
430
431
432
433
434
435 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
436 cmd->result = (DID_OK << 16);
437 cmd->scsi_done(cmd);
438 return 0;
439 }
440
441 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
442 if (!pHba) {
443 return FAILED;
444 }
445
446 rmb();
447 if ((pHba->state) & DPTI_STATE_RESET)
448 return SCSI_MLQUEUE_HOST_BUSY;
449
450
451
452 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
453
454
455
456
457
458 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
459
460
461 cmd->result = (DID_NO_CONNECT << 16);
462 cmd->scsi_done(cmd);
463 return 0;
464 }
465 cmd->device->hostdata = pDev;
466 }
467 pDev->pScsi_dev = cmd->device;
468
469
470
471
472
473 if (pDev->state & DPTI_DEV_RESET ) {
474 return FAILED;
475 }
476 return adpt_scsi_to_i2o(pHba, cmd, pDev);
477}
478
479static DEF_SCSI_QCMD(adpt_queue)
480
481static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
482 sector_t capacity, int geom[])
483{
484 int heads=-1;
485 int sectors=-1;
486 int cylinders=-1;
487
488
489
490
491 if (capacity < 0x2000 ) {
492 heads = 18;
493 sectors = 2;
494 }
495
496 else if (capacity < 0x20000) {
497 heads = 64;
498 sectors = 32;
499 }
500
501 else if (capacity < 0x40000) {
502 heads = 65;
503 sectors = 63;
504 }
505
506 else if (capacity < 0x80000) {
507 heads = 128;
508 sectors = 63;
509 }
510
511 else {
512 heads = 255;
513 sectors = 63;
514 }
515 cylinders = sector_div(capacity, heads * sectors);
516
517
518 if(sdev->type == 5) {
519 heads = 252;
520 sectors = 63;
521 cylinders = 1111;
522 }
523
524 geom[0] = heads;
525 geom[1] = sectors;
526 geom[2] = cylinders;
527
528 PDEBUG("adpt_bios_param: exit\n");
529 return 0;
530}
531
532
533static const char *adpt_info(struct Scsi_Host *host)
534{
535 adpt_hba* pHba;
536
537 pHba = (adpt_hba *) host->hostdata[0];
538 return (char *) (pHba->detail);
539}
540
541static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
542{
543 struct adpt_device* d;
544 int id;
545 int chan;
546 adpt_hba* pHba;
547 int unit;
548
549
550 mutex_lock(&adpt_configuration_lock);
551 for (pHba = hba_chain; pHba; pHba = pHba->next) {
552 if (pHba->host == host) {
553 break;
554 }
555 }
556 mutex_unlock(&adpt_configuration_lock);
557 if (pHba == NULL) {
558 return 0;
559 }
560 host = pHba->host;
561
562 seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
563 seq_printf(m, "%s\n", pHba->detail);
564 seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
565 pHba->host->host_no, pHba->name, host->irq);
566 seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
567 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
568
569 seq_puts(m, "Devices:\n");
570 for(chan = 0; chan < MAX_CHANNEL; chan++) {
571 for(id = 0; id < MAX_ID; id++) {
572 d = pHba->channel[chan].device[id];
573 while(d) {
574 seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
575 seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
576
577 unit = d->pI2o_dev->lct_data.tid;
578 seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
579 unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
580 scsi_device_online(d->pScsi_dev)? "online":"offline");
581 d = d->next_lun;
582 }
583 }
584 }
585 return 0;
586}
587
588
589
590
591static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
592{
593#if BITS_PER_LONG == 32
594 return (u32)(unsigned long)reply;
595#else
596 ulong flags = 0;
597 u32 nr, i;
598
599 spin_lock_irqsave(pHba->host->host_lock, flags);
600 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
601 for (i = 0; i < nr; i++) {
602 if (pHba->ioctl_reply_context[i] == NULL) {
603 pHba->ioctl_reply_context[i] = reply;
604 break;
605 }
606 }
607 spin_unlock_irqrestore(pHba->host->host_lock, flags);
608 if (i >= nr) {
609 printk(KERN_WARNING"%s: Too many outstanding "
610 "ioctl commands\n", pHba->name);
611 return (u32)-1;
612 }
613
614 return i;
615#endif
616}
617
618
619
620
621static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
622{
623#if BITS_PER_LONG == 32
624 return (void *)(unsigned long)context;
625#else
626 void *p = pHba->ioctl_reply_context[context];
627 pHba->ioctl_reply_context[context] = NULL;
628
629 return p;
630#endif
631}
632
633
634
635
636
637
638static int adpt_abort(struct scsi_cmnd * cmd)
639{
640 adpt_hba* pHba = NULL;
641 struct adpt_device* dptdevice;
642 u32 msg[5];
643 int rcode;
644
645 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
646 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
647 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
648 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
649 return FAILED;
650 }
651
652 memset(msg, 0, sizeof(msg));
653 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
654 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
655 msg[2] = 0;
656 msg[3]= 0;
657
658 msg[4] = cmd->request->tag + 1;
659 if (pHba->host)
660 spin_lock_irq(pHba->host->host_lock);
661 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
662 if (pHba->host)
663 spin_unlock_irq(pHba->host->host_lock);
664 if (rcode != 0) {
665 if(rcode == -EOPNOTSUPP ){
666 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
667 return FAILED;
668 }
669 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
670 return FAILED;
671 }
672 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
673 return SUCCESS;
674}
675
676
677#define I2O_DEVICE_RESET 0x27
678
679
680
681static int adpt_device_reset(struct scsi_cmnd* cmd)
682{
683 adpt_hba* pHba;
684 u32 msg[4];
685 u32 rcode;
686 int old_state;
687 struct adpt_device* d = cmd->device->hostdata;
688
689 pHba = (void*) cmd->device->host->hostdata[0];
690 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
691 if (!d) {
692 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
693 return FAILED;
694 }
695 memset(msg, 0, sizeof(msg));
696 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
697 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
698 msg[2] = 0;
699 msg[3] = 0;
700
701 if (pHba->host)
702 spin_lock_irq(pHba->host->host_lock);
703 old_state = d->state;
704 d->state |= DPTI_DEV_RESET;
705 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
706 d->state = old_state;
707 if (pHba->host)
708 spin_unlock_irq(pHba->host->host_lock);
709 if (rcode != 0) {
710 if(rcode == -EOPNOTSUPP ){
711 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
712 return FAILED;
713 }
714 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
715 return FAILED;
716 } else {
717 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
718 return SUCCESS;
719 }
720}
721
722
723#define I2O_HBA_BUS_RESET 0x87
724
725static int adpt_bus_reset(struct scsi_cmnd* cmd)
726{
727 adpt_hba* pHba;
728 u32 msg[4];
729 u32 rcode;
730
731 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
732 memset(msg, 0, sizeof(msg));
733 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
734 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
735 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
736 msg[2] = 0;
737 msg[3] = 0;
738 if (pHba->host)
739 spin_lock_irq(pHba->host->host_lock);
740 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
741 if (pHba->host)
742 spin_unlock_irq(pHba->host->host_lock);
743 if (rcode != 0) {
744 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
745 return FAILED;
746 } else {
747 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
748 return SUCCESS;
749 }
750}
751
752
753static int __adpt_reset(struct scsi_cmnd* cmd)
754{
755 adpt_hba* pHba;
756 int rcode;
757 char name[32];
758
759 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
760 strncpy(name, pHba->name, sizeof(name));
761 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
762 rcode = adpt_hba_reset(pHba);
763 if(rcode == 0){
764 printk(KERN_WARNING"%s: HBA reset complete\n", name);
765 return SUCCESS;
766 } else {
767 printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
768 return FAILED;
769 }
770}
771
772static int adpt_reset(struct scsi_cmnd* cmd)
773{
774 int rc;
775
776 spin_lock_irq(cmd->device->host->host_lock);
777 rc = __adpt_reset(cmd);
778 spin_unlock_irq(cmd->device->host->host_lock);
779
780 return rc;
781}
782
783
784static int adpt_hba_reset(adpt_hba* pHba)
785{
786 int rcode;
787
788 pHba->state |= DPTI_STATE_RESET;
789
790
791 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
792 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
793 adpt_i2o_delete_hba(pHba);
794 return rcode;
795 }
796
797 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
798 adpt_i2o_delete_hba(pHba);
799 return rcode;
800 }
801 PDEBUG("%s: in HOLD state\n",pHba->name);
802
803 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
804 adpt_i2o_delete_hba(pHba);
805 return rcode;
806 }
807 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
808
809 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
810 adpt_i2o_delete_hba(pHba);
811 return rcode;
812 }
813
814 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
815 adpt_i2o_delete_hba(pHba);
816 return rcode;
817 }
818 pHba->state &= ~DPTI_STATE_RESET;
819
820 scsi_host_complete_all_commands(pHba->host, DID_RESET);
821 return 0;
822}
823
824
825
826
827
828
829
830static void adpt_i2o_sys_shutdown(void)
831{
832 adpt_hba *pHba, *pNext;
833 struct adpt_i2o_post_wait_data *p1, *old;
834
835 printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
836 printk(KERN_INFO " This could take a few minutes if there are many devices attached\n");
837
838
839
840
841 for (pHba = hba_chain; pHba; pHba = pNext) {
842 pNext = pHba->next;
843 adpt_i2o_delete_hba(pHba);
844 }
845
846
847
848
849
850
851 for(p1 = adpt_post_wait_queue; p1;) {
852 old = p1;
853 p1 = p1->next;
854 kfree(old);
855 }
856
857 adpt_post_wait_queue = NULL;
858
859 printk(KERN_INFO "Adaptec I2O controllers down.\n");
860}
861
862static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
863{
864
865 adpt_hba* pHba = NULL;
866 adpt_hba* p = NULL;
867 ulong base_addr0_phys = 0;
868 ulong base_addr1_phys = 0;
869 u32 hba_map0_area_size = 0;
870 u32 hba_map1_area_size = 0;
871 void __iomem *base_addr_virt = NULL;
872 void __iomem *msg_addr_virt = NULL;
873 int dma64 = 0;
874
875 int raptorFlag = FALSE;
876
877 if(pci_enable_device(pDev)) {
878 return -EINVAL;
879 }
880
881 if (pci_request_regions(pDev, "dpt_i2o")) {
882 PERROR("dpti: adpt_config_hba: pci request region failed\n");
883 return -EINVAL;
884 }
885
886 pci_set_master(pDev);
887
888
889
890
891 if (sizeof(dma_addr_t) > 4 &&
892 dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
893 dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
894 dma64 = 1;
895
896 if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
897 return -EINVAL;
898
899
900 dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
901
902 base_addr0_phys = pci_resource_start(pDev,0);
903 hba_map0_area_size = pci_resource_len(pDev,0);
904
905
906 if(pDev->device == PCI_DPT_DEVICE_ID){
907 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
908
909 hba_map0_area_size = 0x400000;
910 } else {
911 if(hba_map0_area_size > 0x100000 ){
912 hba_map0_area_size = 0x100000;
913 }
914 }
915 } else {
916
917 base_addr1_phys = pci_resource_start(pDev,1);
918 hba_map1_area_size = pci_resource_len(pDev,1);
919 raptorFlag = TRUE;
920 }
921
922#if BITS_PER_LONG == 64
923
924
925
926
927
928
929
930 if (raptorFlag == TRUE) {
931 if (hba_map0_area_size > 128)
932 hba_map0_area_size = 128;
933 if (hba_map1_area_size > 524288)
934 hba_map1_area_size = 524288;
935 } else {
936 if (hba_map0_area_size > 524288)
937 hba_map0_area_size = 524288;
938 }
939#endif
940
941 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
942 if (!base_addr_virt) {
943 pci_release_regions(pDev);
944 PERROR("dpti: adpt_config_hba: io remap failed\n");
945 return -EINVAL;
946 }
947
948 if(raptorFlag == TRUE) {
949 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
950 if (!msg_addr_virt) {
951 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
952 iounmap(base_addr_virt);
953 pci_release_regions(pDev);
954 return -EINVAL;
955 }
956 } else {
957 msg_addr_virt = base_addr_virt;
958 }
959
960
961 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
962 if (!pHba) {
963 if (msg_addr_virt != base_addr_virt)
964 iounmap(msg_addr_virt);
965 iounmap(base_addr_virt);
966 pci_release_regions(pDev);
967 return -ENOMEM;
968 }
969
970 mutex_lock(&adpt_configuration_lock);
971
972 if(hba_chain != NULL){
973 for(p = hba_chain; p->next; p = p->next);
974 p->next = pHba;
975 } else {
976 hba_chain = pHba;
977 }
978 pHba->next = NULL;
979 pHba->unit = hba_count;
980 sprintf(pHba->name, "dpti%d", hba_count);
981 hba_count++;
982
983 mutex_unlock(&adpt_configuration_lock);
984
985 pHba->pDev = pDev;
986 pHba->base_addr_phys = base_addr0_phys;
987
988
989 pHba->base_addr_virt = base_addr_virt;
990 pHba->msg_addr_virt = msg_addr_virt;
991 pHba->irq_mask = base_addr_virt+0x30;
992 pHba->post_port = base_addr_virt+0x40;
993 pHba->reply_port = base_addr_virt+0x44;
994
995 pHba->hrt = NULL;
996 pHba->lct = NULL;
997 pHba->lct_size = 0;
998 pHba->status_block = NULL;
999 pHba->post_count = 0;
1000 pHba->state = DPTI_STATE_RESET;
1001 pHba->pDev = pDev;
1002 pHba->devices = NULL;
1003 pHba->dma64 = dma64;
1004
1005
1006 spin_lock_init(&pHba->state_lock);
1007 spin_lock_init(&adpt_post_wait_lock);
1008
1009 if(raptorFlag == 0){
1010 printk(KERN_INFO "Adaptec I2O RAID controller"
1011 " %d at %p size=%x irq=%d%s\n",
1012 hba_count-1, base_addr_virt,
1013 hba_map0_area_size, pDev->irq,
1014 dma64 ? " (64-bit DMA)" : "");
1015 } else {
1016 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1017 hba_count-1, pDev->irq,
1018 dma64 ? " (64-bit DMA)" : "");
1019 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1020 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1021 }
1022
1023 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1024 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1025 adpt_i2o_delete_hba(pHba);
1026 return -EINVAL;
1027 }
1028
1029 return 0;
1030}
1031
1032
1033static void adpt_i2o_delete_hba(adpt_hba* pHba)
1034{
1035 adpt_hba* p1;
1036 adpt_hba* p2;
1037 struct i2o_device* d;
1038 struct i2o_device* next;
1039 int i;
1040 int j;
1041 struct adpt_device* pDev;
1042 struct adpt_device* pNext;
1043
1044
1045 mutex_lock(&adpt_configuration_lock);
1046 if(pHba->host){
1047 free_irq(pHba->host->irq, pHba);
1048 }
1049 p2 = NULL;
1050 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1051 if(p1 == pHba) {
1052 if(p2) {
1053 p2->next = p1->next;
1054 } else {
1055 hba_chain = p1->next;
1056 }
1057 break;
1058 }
1059 }
1060
1061 hba_count--;
1062 mutex_unlock(&adpt_configuration_lock);
1063
1064 iounmap(pHba->base_addr_virt);
1065 pci_release_regions(pHba->pDev);
1066 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1067 iounmap(pHba->msg_addr_virt);
1068 }
1069 if(pHba->FwDebugBuffer_P)
1070 iounmap(pHba->FwDebugBuffer_P);
1071 if(pHba->hrt) {
1072 dma_free_coherent(&pHba->pDev->dev,
1073 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1074 pHba->hrt, pHba->hrt_pa);
1075 }
1076 if(pHba->lct) {
1077 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1078 pHba->lct, pHba->lct_pa);
1079 }
1080 if(pHba->status_block) {
1081 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1082 pHba->status_block, pHba->status_block_pa);
1083 }
1084 if(pHba->reply_pool) {
1085 dma_free_coherent(&pHba->pDev->dev,
1086 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1087 pHba->reply_pool, pHba->reply_pool_pa);
1088 }
1089
1090 for(d = pHba->devices; d ; d = next){
1091 next = d->next;
1092 kfree(d);
1093 }
1094 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1095 for(j = 0; j < MAX_ID; j++){
1096 if(pHba->channel[i].device[j] != NULL){
1097 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1098 pNext = pDev->next_lun;
1099 kfree(pDev);
1100 }
1101 }
1102 }
1103 }
1104 pci_dev_put(pHba->pDev);
1105 if (adpt_sysfs_class)
1106 device_destroy(adpt_sysfs_class,
1107 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1108 kfree(pHba);
1109
1110 if(hba_count <= 0){
1111 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1112 if (adpt_sysfs_class) {
1113 class_destroy(adpt_sysfs_class);
1114 adpt_sysfs_class = NULL;
1115 }
1116 }
1117}
1118
1119static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1120{
1121 struct adpt_device* d;
1122
1123 if (chan >= MAX_CHANNEL)
1124 return NULL;
1125
1126 d = pHba->channel[chan].device[id];
1127 if(!d || d->tid == 0) {
1128 return NULL;
1129 }
1130
1131
1132 if(d->scsi_lun == lun){
1133 return d;
1134 }
1135
1136
1137 for(d=d->next_lun ; d ; d = d->next_lun){
1138 if(d->scsi_lun == lun){
1139 return d;
1140 }
1141 }
1142 return NULL;
1143}
1144
1145
1146static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1147{
1148
1149
1150
1151 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1152 int status = 0;
1153 ulong flags = 0;
1154 struct adpt_i2o_post_wait_data *p1, *p2;
1155 struct adpt_i2o_post_wait_data *wait_data =
1156 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1157 DECLARE_WAITQUEUE(wait, current);
1158
1159 if (!wait_data)
1160 return -ENOMEM;
1161
1162
1163
1164
1165
1166 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1167
1168
1169 wait_data->next = adpt_post_wait_queue;
1170 adpt_post_wait_queue = wait_data;
1171 adpt_post_wait_id++;
1172 adpt_post_wait_id &= 0x7fff;
1173 wait_data->id = adpt_post_wait_id;
1174 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1175
1176 wait_data->wq = &adpt_wq_i2o_post;
1177 wait_data->status = -ETIMEDOUT;
1178
1179 add_wait_queue(&adpt_wq_i2o_post, &wait);
1180
1181 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1182 timeout *= HZ;
1183 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1184 set_current_state(TASK_INTERRUPTIBLE);
1185 if(pHba->host)
1186 spin_unlock_irq(pHba->host->host_lock);
1187 if (!timeout)
1188 schedule();
1189 else{
1190 timeout = schedule_timeout(timeout);
1191 if (timeout == 0) {
1192
1193
1194
1195 status = -ETIME;
1196 }
1197 }
1198 if(pHba->host)
1199 spin_lock_irq(pHba->host->host_lock);
1200 }
1201 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1202
1203 if(status == -ETIMEDOUT){
1204 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1205
1206 return status;
1207 }
1208
1209
1210 p2 = NULL;
1211 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1212 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1213 if(p1 == wait_data) {
1214 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1215 status = -EOPNOTSUPP;
1216 }
1217 if(p2) {
1218 p2->next = p1->next;
1219 } else {
1220 adpt_post_wait_queue = p1->next;
1221 }
1222 break;
1223 }
1224 }
1225 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1226
1227 kfree(wait_data);
1228
1229 return status;
1230}
1231
1232
1233static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1234{
1235
1236 u32 m = EMPTY_QUEUE;
1237 u32 __iomem *msg;
1238 ulong timeout = jiffies + 30*HZ;
1239 do {
1240 rmb();
1241 m = readl(pHba->post_port);
1242 if (m != EMPTY_QUEUE) {
1243 break;
1244 }
1245 if(time_after(jiffies,timeout)){
1246 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1247 return -ETIMEDOUT;
1248 }
1249 schedule_timeout_uninterruptible(1);
1250 } while(m == EMPTY_QUEUE);
1251
1252 msg = pHba->msg_addr_virt + m;
1253 memcpy_toio(msg, data, len);
1254 wmb();
1255
1256
1257 writel(m, pHba->post_port);
1258 wmb();
1259
1260 return 0;
1261}
1262
1263
1264static void adpt_i2o_post_wait_complete(u32 context, int status)
1265{
1266 struct adpt_i2o_post_wait_data *p1 = NULL;
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280 context &= 0x7fff;
1281
1282 spin_lock(&adpt_post_wait_lock);
1283 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1284 if(p1->id == context) {
1285 p1->status = status;
1286 spin_unlock(&adpt_post_wait_lock);
1287 wake_up_interruptible(p1->wq);
1288 return;
1289 }
1290 }
1291 spin_unlock(&adpt_post_wait_lock);
1292
1293 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1294 printk(KERN_DEBUG" Tasks in wait queue:\n");
1295 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1296 printk(KERN_DEBUG" %d\n",p1->id);
1297 }
1298 return;
1299}
1300
1301static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1302{
1303 u32 msg[8];
1304 u8* status;
1305 dma_addr_t addr;
1306 u32 m = EMPTY_QUEUE ;
1307 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1308
1309 if(pHba->initialized == FALSE) {
1310 timeout = jiffies + (25*HZ);
1311 } else {
1312 adpt_i2o_quiesce_hba(pHba);
1313 }
1314
1315 do {
1316 rmb();
1317 m = readl(pHba->post_port);
1318 if (m != EMPTY_QUEUE) {
1319 break;
1320 }
1321 if(time_after(jiffies,timeout)){
1322 printk(KERN_WARNING"Timeout waiting for message!\n");
1323 return -ETIMEDOUT;
1324 }
1325 schedule_timeout_uninterruptible(1);
1326 } while (m == EMPTY_QUEUE);
1327
1328 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1329 if(status == NULL) {
1330 adpt_send_nop(pHba, m);
1331 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1332 return -ENOMEM;
1333 }
1334 memset(status,0,4);
1335
1336 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1337 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1338 msg[2]=0;
1339 msg[3]=0;
1340 msg[4]=0;
1341 msg[5]=0;
1342 msg[6]=dma_low(addr);
1343 msg[7]=dma_high(addr);
1344
1345 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1346 wmb();
1347 writel(m, pHba->post_port);
1348 wmb();
1349
1350 while(*status == 0){
1351 if(time_after(jiffies,timeout)){
1352 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1353
1354
1355
1356
1357 return -ETIMEDOUT;
1358 }
1359 rmb();
1360 schedule_timeout_uninterruptible(1);
1361 }
1362
1363 if(*status == 0x01 ) {
1364 PDEBUG("%s: Reset in progress...\n", pHba->name);
1365
1366
1367 do {
1368 rmb();
1369 m = readl(pHba->post_port);
1370 if (m != EMPTY_QUEUE) {
1371 break;
1372 }
1373 if(time_after(jiffies,timeout)){
1374 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1375
1376
1377
1378
1379 return -ETIMEDOUT;
1380 }
1381 schedule_timeout_uninterruptible(1);
1382 } while (m == EMPTY_QUEUE);
1383
1384 adpt_send_nop(pHba, m);
1385 }
1386 adpt_i2o_status_get(pHba);
1387 if(*status == 0x02 ||
1388 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1389 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1390 pHba->name);
1391 } else {
1392 PDEBUG("%s: Reset completed.\n", pHba->name);
1393 }
1394
1395 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1396#ifdef UARTDELAY
1397
1398
1399 adpt_delay(20000);
1400#endif
1401 return 0;
1402}
1403
1404
1405static int adpt_i2o_parse_lct(adpt_hba* pHba)
1406{
1407 int i;
1408 int max;
1409 int tid;
1410 struct i2o_device *d;
1411 i2o_lct *lct = pHba->lct;
1412 u8 bus_no = 0;
1413 s16 scsi_id;
1414 u64 scsi_lun;
1415 u32 buf[10];
1416 struct adpt_device* pDev;
1417
1418 if (lct == NULL) {
1419 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1420 return -1;
1421 }
1422
1423 max = lct->table_size;
1424 max -= 3;
1425 max /= 9;
1426
1427 for(i=0;i<max;i++) {
1428 if( lct->lct_entry[i].user_tid != 0xfff){
1429
1430
1431
1432
1433
1434
1435
1436 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1437 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1438 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1439 continue;
1440 }
1441 tid = lct->lct_entry[i].tid;
1442
1443 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1444 continue;
1445 }
1446 bus_no = buf[0]>>16;
1447 scsi_id = buf[1];
1448 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1449 if(bus_no >= MAX_CHANNEL) {
1450 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1451 continue;
1452 }
1453 if (scsi_id >= MAX_ID){
1454 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1455 continue;
1456 }
1457 if(bus_no > pHba->top_scsi_channel){
1458 pHba->top_scsi_channel = bus_no;
1459 }
1460 if(scsi_id > pHba->top_scsi_id){
1461 pHba->top_scsi_id = scsi_id;
1462 }
1463 if(scsi_lun > pHba->top_scsi_lun){
1464 pHba->top_scsi_lun = scsi_lun;
1465 }
1466 continue;
1467 }
1468 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1469 if(d==NULL)
1470 {
1471 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1472 return -ENOMEM;
1473 }
1474
1475 d->controller = pHba;
1476 d->next = NULL;
1477
1478 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1479
1480 d->flags = 0;
1481 tid = d->lct_data.tid;
1482 adpt_i2o_report_hba_unit(pHba, d);
1483 adpt_i2o_install_device(pHba, d);
1484 }
1485 bus_no = 0;
1486 for(d = pHba->devices; d ; d = d->next) {
1487 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1488 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1489 tid = d->lct_data.tid;
1490
1491
1492 if(bus_no > pHba->top_scsi_channel){
1493 pHba->top_scsi_channel = bus_no;
1494 }
1495 pHba->channel[bus_no].type = d->lct_data.class_id;
1496 pHba->channel[bus_no].tid = tid;
1497 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1498 {
1499 pHba->channel[bus_no].scsi_id = buf[1];
1500 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1501 }
1502
1503 bus_no++;
1504 if(bus_no >= MAX_CHANNEL) {
1505 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1506 break;
1507 }
1508 }
1509 }
1510
1511
1512 for(d = pHba->devices; d ; d = d->next) {
1513 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1514 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1515 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1516
1517 tid = d->lct_data.tid;
1518 scsi_id = -1;
1519
1520 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1521 bus_no = buf[0]>>16;
1522 scsi_id = buf[1];
1523 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1524 if(bus_no >= MAX_CHANNEL) {
1525 continue;
1526 }
1527 if (scsi_id >= MAX_ID) {
1528 continue;
1529 }
1530 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1531 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1532 if(pDev == NULL) {
1533 return -ENOMEM;
1534 }
1535 pHba->channel[bus_no].device[scsi_id] = pDev;
1536 } else {
1537 for( pDev = pHba->channel[bus_no].device[scsi_id];
1538 pDev->next_lun; pDev = pDev->next_lun){
1539 }
1540 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1541 if(pDev->next_lun == NULL) {
1542 return -ENOMEM;
1543 }
1544 pDev = pDev->next_lun;
1545 }
1546 pDev->tid = tid;
1547 pDev->scsi_channel = bus_no;
1548 pDev->scsi_id = scsi_id;
1549 pDev->scsi_lun = scsi_lun;
1550 pDev->pI2o_dev = d;
1551 d->owner = pDev;
1552 pDev->type = (buf[0])&0xff;
1553 pDev->flags = (buf[0]>>8)&0xff;
1554 if(scsi_id > pHba->top_scsi_id){
1555 pHba->top_scsi_id = scsi_id;
1556 }
1557 if(scsi_lun > pHba->top_scsi_lun){
1558 pHba->top_scsi_lun = scsi_lun;
1559 }
1560 }
1561 if(scsi_id == -1){
1562 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1563 d->lct_data.identity_tag);
1564 }
1565 }
1566 }
1567 return 0;
1568}
1569
1570
1571
1572
1573
1574
1575
1576static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1577{
1578 mutex_lock(&adpt_configuration_lock);
1579 d->controller=pHba;
1580 d->owner=NULL;
1581 d->next=pHba->devices;
1582 d->prev=NULL;
1583 if (pHba->devices != NULL){
1584 pHba->devices->prev=d;
1585 }
1586 pHba->devices=d;
1587 *d->dev_name = 0;
1588
1589 mutex_unlock(&adpt_configuration_lock);
1590 return 0;
1591}
1592
1593static int adpt_open(struct inode *inode, struct file *file)
1594{
1595 int minor;
1596 adpt_hba* pHba;
1597
1598 mutex_lock(&adpt_mutex);
1599
1600
1601 minor = iminor(inode);
1602 if (minor >= hba_count) {
1603 mutex_unlock(&adpt_mutex);
1604 return -ENXIO;
1605 }
1606 mutex_lock(&adpt_configuration_lock);
1607 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1608 if (pHba->unit == minor) {
1609 break;
1610 }
1611 }
1612 if (pHba == NULL) {
1613 mutex_unlock(&adpt_configuration_lock);
1614 mutex_unlock(&adpt_mutex);
1615 return -ENXIO;
1616 }
1617
1618
1619
1620
1621
1622
1623 pHba->in_use = 1;
1624 mutex_unlock(&adpt_configuration_lock);
1625 mutex_unlock(&adpt_mutex);
1626
1627 return 0;
1628}
1629
1630static int adpt_close(struct inode *inode, struct file *file)
1631{
1632 int minor;
1633 adpt_hba* pHba;
1634
1635 minor = iminor(inode);
1636 if (minor >= hba_count) {
1637 return -ENXIO;
1638 }
1639 mutex_lock(&adpt_configuration_lock);
1640 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1641 if (pHba->unit == minor) {
1642 break;
1643 }
1644 }
1645 mutex_unlock(&adpt_configuration_lock);
1646 if (pHba == NULL) {
1647 return -ENXIO;
1648 }
1649
1650 pHba->in_use = 0;
1651
1652 return 0;
1653}
1654
1655
1656static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1657{
1658 u32 msg[MAX_MESSAGE_SIZE];
1659 u32* reply = NULL;
1660 u32 size = 0;
1661 u32 reply_size = 0;
1662 u32 __user *user_msg = arg;
1663 u32 __user * user_reply = NULL;
1664 void **sg_list = NULL;
1665 u32 sg_offset = 0;
1666 u32 sg_count = 0;
1667 int sg_index = 0;
1668 u32 i = 0;
1669 u32 rcode = 0;
1670 void *p = NULL;
1671 dma_addr_t addr;
1672 ulong flags = 0;
1673
1674 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1675
1676 if(get_user(size, &user_msg[0])){
1677 return -EFAULT;
1678 }
1679 size = size>>16;
1680
1681 user_reply = &user_msg[size];
1682 if(size > MAX_MESSAGE_SIZE){
1683 return -EFAULT;
1684 }
1685 size *= 4;
1686
1687
1688 if(copy_from_user(msg, user_msg, size)) {
1689 return -EFAULT;
1690 }
1691 get_user(reply_size, &user_reply[0]);
1692 reply_size = reply_size>>16;
1693 if(reply_size > REPLY_FRAME_SIZE){
1694 reply_size = REPLY_FRAME_SIZE;
1695 }
1696 reply_size *= 4;
1697 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1698 if(reply == NULL) {
1699 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1700 return -ENOMEM;
1701 }
1702 sg_offset = (msg[0]>>4)&0xf;
1703 msg[2] = 0x40000000;
1704 msg[3] = adpt_ioctl_to_context(pHba, reply);
1705 if (msg[3] == (u32)-1) {
1706 rcode = -EBUSY;
1707 goto free;
1708 }
1709
1710 sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
1711 if (!sg_list) {
1712 rcode = -ENOMEM;
1713 goto free;
1714 }
1715 if(sg_offset) {
1716
1717 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1718 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1719 if (sg_count > pHba->sg_tablesize){
1720 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1721 rcode = -EINVAL;
1722 goto free;
1723 }
1724
1725 for(i = 0; i < sg_count; i++) {
1726 int sg_size;
1727
1728 if (!(sg[i].flag_count & 0x10000000 )) {
1729 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1730 rcode = -EINVAL;
1731 goto cleanup;
1732 }
1733 sg_size = sg[i].flag_count & 0xffffff;
1734
1735 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1736 if(!p) {
1737 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1738 pHba->name,sg_size,i,sg_count);
1739 rcode = -ENOMEM;
1740 goto cleanup;
1741 }
1742 sg_list[sg_index++] = p;
1743
1744 if(sg[i].flag_count & 0x04000000 ) {
1745
1746 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1747 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1748 rcode = -EFAULT;
1749 goto cleanup;
1750 }
1751 }
1752
1753 sg[i].addr_bus = addr;
1754 }
1755 }
1756
1757 do {
1758
1759
1760
1761
1762 if (pHba->host) {
1763 scsi_block_requests(pHba->host);
1764 spin_lock_irqsave(pHba->host->host_lock, flags);
1765 }
1766 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1767 if (rcode != 0)
1768 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1769 rcode, reply);
1770 if (pHba->host) {
1771 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1772 scsi_unblock_requests(pHba->host);
1773 }
1774 } while (rcode == -ETIMEDOUT);
1775
1776 if(rcode){
1777 goto cleanup;
1778 }
1779
1780 if(sg_offset) {
1781
1782 u32 j;
1783
1784 struct sg_simple_element* sg;
1785 int sg_size;
1786
1787
1788 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1789
1790 if(get_user(size, &user_msg[0])){
1791 rcode = -EFAULT;
1792 goto cleanup;
1793 }
1794 size = size>>16;
1795 size *= 4;
1796 if (size > MAX_MESSAGE_SIZE) {
1797 rcode = -EINVAL;
1798 goto cleanup;
1799 }
1800
1801 if (copy_from_user (msg, user_msg, size)) {
1802 rcode = -EFAULT;
1803 goto cleanup;
1804 }
1805 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1806
1807
1808 sg = (struct sg_simple_element*)(msg + sg_offset);
1809 for (j = 0; j < sg_count; j++) {
1810
1811 if(! (sg[j].flag_count & 0x4000000 )) {
1812 sg_size = sg[j].flag_count & 0xffffff;
1813
1814 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1815 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1816 rcode = -EFAULT;
1817 goto cleanup;
1818 }
1819 }
1820 }
1821 }
1822
1823
1824 if (reply_size) {
1825
1826 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1827 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1828 rcode = -EFAULT;
1829 }
1830 if(copy_to_user(user_reply, reply, reply_size)) {
1831 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1832 rcode = -EFAULT;
1833 }
1834 }
1835
1836
1837cleanup:
1838 if (rcode != -ETIME && rcode != -EINTR) {
1839 struct sg_simple_element *sg =
1840 (struct sg_simple_element*) (msg +sg_offset);
1841 while(sg_index) {
1842 if(sg_list[--sg_index]) {
1843 dma_free_coherent(&pHba->pDev->dev,
1844 sg[sg_index].flag_count & 0xffffff,
1845 sg_list[sg_index],
1846 sg[sg_index].addr_bus);
1847 }
1848 }
1849 }
1850
1851free:
1852 kfree(sg_list);
1853 kfree(reply);
1854 return rcode;
1855}
1856
1857#if defined __ia64__
1858static void adpt_ia64_info(sysInfo_S* si)
1859{
1860
1861
1862
1863 si->processorType = PROC_IA64;
1864}
1865#endif
1866
1867#if defined __sparc__
1868static void adpt_sparc_info(sysInfo_S* si)
1869{
1870
1871
1872
1873 si->processorType = PROC_ULTRASPARC;
1874}
1875#endif
1876#if defined __alpha__
1877static void adpt_alpha_info(sysInfo_S* si)
1878{
1879
1880
1881
1882 si->processorType = PROC_ALPHA;
1883}
1884#endif
1885
1886#if defined __i386__
1887
1888#include <uapi/asm/vm86.h>
1889
1890static void adpt_i386_info(sysInfo_S* si)
1891{
1892
1893
1894
1895 switch (boot_cpu_data.x86) {
1896 case CPU_386:
1897 si->processorType = PROC_386;
1898 break;
1899 case CPU_486:
1900 si->processorType = PROC_486;
1901 break;
1902 case CPU_586:
1903 si->processorType = PROC_PENTIUM;
1904 break;
1905 default:
1906 si->processorType = PROC_PENTIUM;
1907 break;
1908 }
1909}
1910#endif
1911
1912
1913
1914
1915
1916
1917
1918static int adpt_system_info(void __user *buffer)
1919{
1920 sysInfo_S si;
1921
1922 memset(&si, 0, sizeof(si));
1923
1924 si.osType = OS_LINUX;
1925 si.osMajorVersion = 0;
1926 si.osMinorVersion = 0;
1927 si.osRevision = 0;
1928 si.busType = SI_PCI_BUS;
1929 si.processorFamily = DPTI_sig.dsProcessorFamily;
1930
1931#if defined __i386__
1932 adpt_i386_info(&si);
1933#elif defined (__ia64__)
1934 adpt_ia64_info(&si);
1935#elif defined(__sparc__)
1936 adpt_sparc_info(&si);
1937#elif defined (__alpha__)
1938 adpt_alpha_info(&si);
1939#else
1940 si.processorType = 0xff ;
1941#endif
1942 if (copy_to_user(buffer, &si, sizeof(si))){
1943 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1944 return -EFAULT;
1945 }
1946
1947 return 0;
1948}
1949
1950static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1951{
1952 int minor;
1953 int error = 0;
1954 adpt_hba* pHba;
1955 ulong flags = 0;
1956 void __user *argp = (void __user *)arg;
1957
1958 minor = iminor(inode);
1959 if (minor >= DPTI_MAX_HBA){
1960 return -ENXIO;
1961 }
1962 mutex_lock(&adpt_configuration_lock);
1963 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1964 if (pHba->unit == minor) {
1965 break;
1966 }
1967 }
1968 mutex_unlock(&adpt_configuration_lock);
1969 if(pHba == NULL){
1970 return -ENXIO;
1971 }
1972
1973 while((volatile u32) pHba->state & DPTI_STATE_RESET )
1974 schedule_timeout_uninterruptible(2);
1975
1976 switch (cmd) {
1977
1978 case DPT_SIGNATURE:
1979 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1980 return -EFAULT;
1981 }
1982 break;
1983 case I2OUSRCMD:
1984 return adpt_i2o_passthru(pHba, argp);
1985
1986 case DPT_CTRLINFO:{
1987 drvrHBAinfo_S HbaInfo;
1988
1989#define FLG_OSD_PCI_VALID 0x0001
1990#define FLG_OSD_DMA 0x0002
1991#define FLG_OSD_I2O 0x0004
1992 memset(&HbaInfo, 0, sizeof(HbaInfo));
1993 HbaInfo.drvrHBAnum = pHba->unit;
1994 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1995 HbaInfo.blinkState = adpt_read_blink_led(pHba);
1996 HbaInfo.pciBusNum = pHba->pDev->bus->number;
1997 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
1998 HbaInfo.Interrupt = pHba->pDev->irq;
1999 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2000 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2001 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2002 return -EFAULT;
2003 }
2004 break;
2005 }
2006 case DPT_SYSINFO:
2007 return adpt_system_info(argp);
2008 case DPT_BLINKLED:{
2009 u32 value;
2010 value = (u32)adpt_read_blink_led(pHba);
2011 if (copy_to_user(argp, &value, sizeof(value))) {
2012 return -EFAULT;
2013 }
2014 break;
2015 }
2016 case I2ORESETCMD: {
2017 struct Scsi_Host *shost = pHba->host;
2018
2019 if (shost)
2020 spin_lock_irqsave(shost->host_lock, flags);
2021 adpt_hba_reset(pHba);
2022 if (shost)
2023 spin_unlock_irqrestore(shost->host_lock, flags);
2024 break;
2025 }
2026 case I2ORESCANCMD:
2027 adpt_rescan(pHba);
2028 break;
2029 default:
2030 return -EINVAL;
2031 }
2032
2033 return error;
2034}
2035
2036static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2037{
2038 struct inode *inode;
2039 long ret;
2040
2041 inode = file_inode(file);
2042
2043 mutex_lock(&adpt_mutex);
2044 ret = adpt_ioctl(inode, file, cmd, arg);
2045 mutex_unlock(&adpt_mutex);
2046
2047 return ret;
2048}
2049
2050#ifdef CONFIG_COMPAT
2051static long compat_adpt_ioctl(struct file *file,
2052 unsigned int cmd, unsigned long arg)
2053{
2054 struct inode *inode;
2055 long ret;
2056
2057 inode = file_inode(file);
2058
2059 mutex_lock(&adpt_mutex);
2060
2061 switch(cmd) {
2062 case DPT_SIGNATURE:
2063 case I2OUSRCMD:
2064 case DPT_CTRLINFO:
2065 case DPT_SYSINFO:
2066 case DPT_BLINKLED:
2067 case I2ORESETCMD:
2068 case I2ORESCANCMD:
2069 case (DPT_TARGET_BUSY & 0xFFFF):
2070 case DPT_TARGET_BUSY:
2071 ret = adpt_ioctl(inode, file, cmd, arg);
2072 break;
2073 default:
2074 ret = -ENOIOCTLCMD;
2075 }
2076
2077 mutex_unlock(&adpt_mutex);
2078
2079 return ret;
2080}
2081#endif
2082
2083static irqreturn_t adpt_isr(int irq, void *dev_id)
2084{
2085 struct scsi_cmnd* cmd;
2086 adpt_hba* pHba = dev_id;
2087 u32 m;
2088 void __iomem *reply;
2089 u32 status=0;
2090 u32 context;
2091 ulong flags = 0;
2092 int handled = 0;
2093
2094 if (pHba == NULL){
2095 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2096 return IRQ_NONE;
2097 }
2098 if(pHba->host)
2099 spin_lock_irqsave(pHba->host->host_lock, flags);
2100
2101 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2102 m = readl(pHba->reply_port);
2103 if(m == EMPTY_QUEUE){
2104
2105 rmb();
2106 m = readl(pHba->reply_port);
2107 if(m == EMPTY_QUEUE){
2108
2109 printk(KERN_ERR"dpti: Could not get reply frame\n");
2110 goto out;
2111 }
2112 }
2113 if (pHba->reply_pool_pa <= m &&
2114 m < pHba->reply_pool_pa +
2115 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2116 reply = (u8 *)pHba->reply_pool +
2117 (m - pHba->reply_pool_pa);
2118 } else {
2119
2120 printk(KERN_ERR "dpti: reply frame not from pool\n");
2121 reply = (u8 *)bus_to_virt(m);
2122 }
2123
2124 if (readl(reply) & MSG_FAIL) {
2125 u32 old_m = readl(reply+28);
2126 void __iomem *msg;
2127 u32 old_context;
2128 PDEBUG("%s: Failed message\n",pHba->name);
2129 if(old_m >= 0x100000){
2130 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2131 writel(m,pHba->reply_port);
2132 continue;
2133 }
2134
2135 msg = pHba->msg_addr_virt + old_m;
2136 old_context = readl(msg+12);
2137 writel(old_context, reply+12);
2138 adpt_send_nop(pHba, old_m);
2139 }
2140 context = readl(reply+8);
2141 if(context & 0x40000000){
2142 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2143 if( p != NULL) {
2144 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2145 }
2146
2147 }
2148 if(context & 0x80000000){
2149 status = readl(reply+16);
2150 if(status >> 24){
2151 status &= 0xffff;
2152 } else {
2153 status = I2O_POST_WAIT_OK;
2154 }
2155 if(!(context & 0x40000000)) {
2156
2157
2158
2159
2160 cmd = scsi_host_find_tag(pHba->host,
2161 readl(reply + 12) - 1);
2162 if(cmd != NULL) {
2163 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2164 }
2165 }
2166 adpt_i2o_post_wait_complete(context, status);
2167 } else {
2168
2169
2170
2171
2172 cmd = scsi_host_find_tag(pHba->host,
2173 readl(reply + 12) - 1);
2174 if(cmd != NULL){
2175 scsi_dma_unmap(cmd);
2176 adpt_i2o_scsi_complete(reply, cmd);
2177 }
2178 }
2179 writel(m, pHba->reply_port);
2180 wmb();
2181 rmb();
2182 }
2183 handled = 1;
2184out: if(pHba->host)
2185 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2186 return IRQ_RETVAL(handled);
2187}
2188
2189static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2190{
2191 int i;
2192 u32 msg[MAX_MESSAGE_SIZE];
2193 u32* mptr;
2194 u32* lptr;
2195 u32 *lenptr;
2196 int direction;
2197 int scsidir;
2198 int nseg;
2199 u32 len;
2200 u32 reqlen;
2201 s32 rcode;
2202 dma_addr_t addr;
2203
2204 memset(msg, 0 , sizeof(msg));
2205 len = scsi_bufflen(cmd);
2206 direction = 0x00000000;
2207
2208 scsidir = 0x00000000;
2209 if(len) {
2210
2211
2212
2213
2214
2215
2216 switch(cmd->sc_data_direction){
2217 case DMA_FROM_DEVICE:
2218 scsidir =0x40000000;
2219 break;
2220 case DMA_TO_DEVICE:
2221 direction=0x04000000;
2222 scsidir =0x80000000;
2223 break;
2224 case DMA_NONE:
2225 break;
2226 case DMA_BIDIRECTIONAL:
2227 scsidir =0x40000000;
2228
2229 break;
2230 default:
2231 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2232 pHba->name, cmd->cmnd[0]);
2233 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2234 cmd->scsi_done(cmd);
2235 return 0;
2236 }
2237 }
2238
2239
2240 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2241 msg[2] = 0;
2242
2243 msg[3] = cmd->request->tag + 1;
2244
2245
2246 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2247 msg[5] = d->tid;
2248
2249
2250
2251
2252 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2253
2254 mptr=msg+7;
2255
2256
2257 memset(mptr, 0, 16);
2258 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2259 mptr+=4;
2260 lenptr=mptr++;
2261 if (dpt_dma64(pHba)) {
2262 reqlen = 16;
2263 *mptr++ = (0x7C<<24)+(2<<16)+0x02;
2264 *mptr++ = 1 << PAGE_SHIFT;
2265 } else {
2266 reqlen = 14;
2267 }
2268
2269
2270 nseg = scsi_dma_map(cmd);
2271 BUG_ON(nseg < 0);
2272 if (nseg) {
2273 struct scatterlist *sg;
2274
2275 len = 0;
2276 scsi_for_each_sg(cmd, sg, nseg, i) {
2277 lptr = mptr;
2278 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2279 len+=sg_dma_len(sg);
2280 addr = sg_dma_address(sg);
2281 *mptr++ = dma_low(addr);
2282 if (dpt_dma64(pHba))
2283 *mptr++ = dma_high(addr);
2284
2285 if (i == nseg - 1)
2286 *lptr = direction|0xD0000000|sg_dma_len(sg);
2287 }
2288 reqlen = mptr - msg;
2289 *lenptr = len;
2290
2291 if(cmd->underflow && len != cmd->underflow){
2292 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2293 len, cmd->underflow);
2294 }
2295 } else {
2296 *lenptr = len = 0;
2297 reqlen = 12;
2298 }
2299
2300
2301 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2302
2303
2304 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2305 if (rcode == 0) {
2306 return 0;
2307 }
2308 return rcode;
2309}
2310
2311
2312static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2313{
2314 struct Scsi_Host *host;
2315
2316 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2317 if (host == NULL) {
2318 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2319 return -1;
2320 }
2321 host->hostdata[0] = (unsigned long)pHba;
2322 pHba->host = host;
2323
2324 host->irq = pHba->pDev->irq;
2325
2326
2327
2328 host->io_port = 0;
2329 host->n_io_port = 0;
2330
2331 host->max_id = 16;
2332 host->max_lun = 256;
2333 host->max_channel = pHba->top_scsi_channel + 1;
2334 host->cmd_per_lun = 1;
2335 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2336 host->sg_tablesize = pHba->sg_tablesize;
2337 host->can_queue = pHba->post_fifo_size;
2338
2339 return 0;
2340}
2341
2342
2343static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd)
2344{
2345 adpt_hba* pHba;
2346 u32 hba_status;
2347 u32 dev_status;
2348 u32 reply_flags = readl(reply) & 0xff00;
2349
2350
2351
2352 u16 detailed_status = readl(reply+16) &0xffff;
2353 dev_status = (detailed_status & 0xff);
2354 hba_status = detailed_status >> 8;
2355
2356
2357 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2358
2359 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2360
2361 cmd->sense_buffer[0] = '\0';
2362
2363 if(!(reply_flags & MSG_FAIL)) {
2364 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2365 case I2O_SCSI_DSC_SUCCESS:
2366 cmd->result = (DID_OK << 16);
2367
2368 if (readl(reply+20) < cmd->underflow) {
2369 cmd->result = (DID_ERROR <<16);
2370 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2371 }
2372 break;
2373 case I2O_SCSI_DSC_REQUEST_ABORTED:
2374 cmd->result = (DID_ABORT << 16);
2375 break;
2376 case I2O_SCSI_DSC_PATH_INVALID:
2377 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2378 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2379 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2380 case I2O_SCSI_DSC_NO_ADAPTER:
2381 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2382 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2383 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2384 cmd->result = (DID_TIME_OUT << 16);
2385 break;
2386 case I2O_SCSI_DSC_ADAPTER_BUSY:
2387 case I2O_SCSI_DSC_BUS_BUSY:
2388 cmd->result = (DID_BUS_BUSY << 16);
2389 break;
2390 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2391 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2392 cmd->result = (DID_RESET << 16);
2393 break;
2394 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2395 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2396 cmd->result = (DID_PARITY << 16);
2397 break;
2398 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2399 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2400 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2401 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2402 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2403 case I2O_SCSI_DSC_DATA_OVERRUN:
2404 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2405 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2406 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2407 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2408 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2409 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2410 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2411 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2412 case I2O_SCSI_DSC_INVALID_CDB:
2413 case I2O_SCSI_DSC_LUN_INVALID:
2414 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2415 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2416 case I2O_SCSI_DSC_NO_NEXUS:
2417 case I2O_SCSI_DSC_CDB_RECEIVED:
2418 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2419 case I2O_SCSI_DSC_QUEUE_FROZEN:
2420 case I2O_SCSI_DSC_REQUEST_INVALID:
2421 default:
2422 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2423 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2424 hba_status, dev_status, cmd->cmnd[0]);
2425 cmd->result = (DID_ERROR << 16);
2426 break;
2427 }
2428
2429
2430
2431 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2432 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2433
2434 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2435 if(cmd->sense_buffer[0] == 0x70 &&
2436 cmd->sense_buffer[2] == DATA_PROTECT ){
2437
2438 cmd->result = (DID_TIME_OUT << 16);
2439 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2440 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2441 hba_status, dev_status, cmd->cmnd[0]);
2442
2443 }
2444 }
2445 } else {
2446
2447
2448
2449
2450 cmd->result = (DID_TIME_OUT << 16);
2451 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2452 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2453 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2454 }
2455
2456 cmd->result |= (dev_status);
2457
2458 if(cmd->scsi_done != NULL){
2459 cmd->scsi_done(cmd);
2460 }
2461}
2462
2463
2464static s32 adpt_rescan(adpt_hba* pHba)
2465{
2466 s32 rcode;
2467 ulong flags = 0;
2468
2469 if(pHba->host)
2470 spin_lock_irqsave(pHba->host->host_lock, flags);
2471 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2472 goto out;
2473 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2474 goto out;
2475 rcode = 0;
2476out: if(pHba->host)
2477 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2478 return rcode;
2479}
2480
2481
2482static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2483{
2484 int i;
2485 int max;
2486 int tid;
2487 struct i2o_device *d;
2488 i2o_lct *lct = pHba->lct;
2489 u8 bus_no = 0;
2490 s16 scsi_id;
2491 u64 scsi_lun;
2492 u32 buf[10];
2493 struct adpt_device* pDev = NULL;
2494 struct i2o_device* pI2o_dev = NULL;
2495
2496 if (lct == NULL) {
2497 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2498 return -1;
2499 }
2500
2501 max = lct->table_size;
2502 max -= 3;
2503 max /= 9;
2504
2505
2506 for (d = pHba->devices; d; d = d->next) {
2507 pDev =(struct adpt_device*) d->owner;
2508 if(!pDev){
2509 continue;
2510 }
2511 pDev->state |= DPTI_DEV_UNSCANNED;
2512 }
2513
2514 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2515
2516 for(i=0;i<max;i++) {
2517 if( lct->lct_entry[i].user_tid != 0xfff){
2518 continue;
2519 }
2520
2521 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2522 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2523 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2524 tid = lct->lct_entry[i].tid;
2525 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2526 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2527 continue;
2528 }
2529 bus_no = buf[0]>>16;
2530 if (bus_no >= MAX_CHANNEL) {
2531 printk(KERN_WARNING
2532 "%s: Channel number %d out of range\n",
2533 pHba->name, bus_no);
2534 continue;
2535 }
2536
2537 scsi_id = buf[1];
2538 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2539 pDev = pHba->channel[bus_no].device[scsi_id];
2540
2541 while(pDev) {
2542 if(pDev->scsi_lun == scsi_lun) {
2543 break;
2544 }
2545 pDev = pDev->next_lun;
2546 }
2547 if(!pDev ) {
2548 d = kmalloc(sizeof(struct i2o_device),
2549 GFP_ATOMIC);
2550 if(d==NULL)
2551 {
2552 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2553 return -ENOMEM;
2554 }
2555
2556 d->controller = pHba;
2557 d->next = NULL;
2558
2559 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2560
2561 d->flags = 0;
2562 adpt_i2o_report_hba_unit(pHba, d);
2563 adpt_i2o_install_device(pHba, d);
2564
2565 pDev = pHba->channel[bus_no].device[scsi_id];
2566 if( pDev == NULL){
2567 pDev =
2568 kzalloc(sizeof(struct adpt_device),
2569 GFP_ATOMIC);
2570 if(pDev == NULL) {
2571 return -ENOMEM;
2572 }
2573 pHba->channel[bus_no].device[scsi_id] = pDev;
2574 } else {
2575 while (pDev->next_lun) {
2576 pDev = pDev->next_lun;
2577 }
2578 pDev = pDev->next_lun =
2579 kzalloc(sizeof(struct adpt_device),
2580 GFP_ATOMIC);
2581 if(pDev == NULL) {
2582 return -ENOMEM;
2583 }
2584 }
2585 pDev->tid = d->lct_data.tid;
2586 pDev->scsi_channel = bus_no;
2587 pDev->scsi_id = scsi_id;
2588 pDev->scsi_lun = scsi_lun;
2589 pDev->pI2o_dev = d;
2590 d->owner = pDev;
2591 pDev->type = (buf[0])&0xff;
2592 pDev->flags = (buf[0]>>8)&0xff;
2593
2594 if(scsi_id > pHba->top_scsi_id){
2595 pHba->top_scsi_id = scsi_id;
2596 }
2597 if(scsi_lun > pHba->top_scsi_lun){
2598 pHba->top_scsi_lun = scsi_lun;
2599 }
2600 continue;
2601 }
2602
2603
2604 while(pDev) {
2605 if(pDev->scsi_lun == scsi_lun) {
2606 if(!scsi_device_online(pDev->pScsi_dev)) {
2607 printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2608 pHba->name,bus_no,scsi_id,scsi_lun);
2609 if (pDev->pScsi_dev) {
2610 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2611 }
2612 }
2613 d = pDev->pI2o_dev;
2614 if(d->lct_data.tid != tid) {
2615 pDev->tid = tid;
2616 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2617 if (pDev->pScsi_dev) {
2618 pDev->pScsi_dev->changed = TRUE;
2619 pDev->pScsi_dev->removable = TRUE;
2620 }
2621 }
2622
2623 pDev->state = DPTI_DEV_ONLINE;
2624 break;
2625 }
2626 pDev = pDev->next_lun;
2627 }
2628 }
2629 }
2630 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2631 pDev =(struct adpt_device*) pI2o_dev->owner;
2632 if(!pDev){
2633 continue;
2634 }
2635
2636
2637 if (pDev->state & DPTI_DEV_UNSCANNED){
2638 pDev->state = DPTI_DEV_OFFLINE;
2639 printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2640 if (pDev->pScsi_dev) {
2641 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2642 }
2643 }
2644 }
2645 return 0;
2646}
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658static int adpt_i2o_activate_hba(adpt_hba* pHba)
2659{
2660 int rcode;
2661
2662 if(pHba->initialized ) {
2663 if (adpt_i2o_status_get(pHba) < 0) {
2664 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2665 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2666 return rcode;
2667 }
2668 if (adpt_i2o_status_get(pHba) < 0) {
2669 printk(KERN_INFO "HBA not responding.\n");
2670 return -1;
2671 }
2672 }
2673
2674 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2675 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2676 return -1;
2677 }
2678
2679 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2680 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2681 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2682 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2683 adpt_i2o_reset_hba(pHba);
2684 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2685 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2686 return -1;
2687 }
2688 }
2689 } else {
2690 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2691 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2692 return rcode;
2693 }
2694
2695 }
2696
2697 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2698 return -1;
2699 }
2700
2701
2702
2703 if (adpt_i2o_hrt_get(pHba) < 0) {
2704 return -1;
2705 }
2706
2707 return 0;
2708}
2709
2710
2711
2712
2713
2714static int adpt_i2o_online_hba(adpt_hba* pHba)
2715{
2716 if (adpt_i2o_systab_send(pHba) < 0)
2717 return -1;
2718
2719
2720 if (adpt_i2o_enable_hba(pHba) < 0)
2721 return -1;
2722
2723
2724 return 0;
2725}
2726
2727static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2728{
2729 u32 __iomem *msg;
2730 ulong timeout = jiffies + 5*HZ;
2731
2732 while(m == EMPTY_QUEUE){
2733 rmb();
2734 m = readl(pHba->post_port);
2735 if(m != EMPTY_QUEUE){
2736 break;
2737 }
2738 if(time_after(jiffies,timeout)){
2739 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2740 return 2;
2741 }
2742 schedule_timeout_uninterruptible(1);
2743 }
2744 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2745 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2746 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2747 writel( 0,&msg[2]);
2748 wmb();
2749
2750 writel(m, pHba->post_port);
2751 wmb();
2752 return 0;
2753}
2754
2755static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2756{
2757 u8 *status;
2758 dma_addr_t addr;
2759 u32 __iomem *msg = NULL;
2760 int i;
2761 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2762 u32 m;
2763
2764 do {
2765 rmb();
2766 m = readl(pHba->post_port);
2767 if (m != EMPTY_QUEUE) {
2768 break;
2769 }
2770
2771 if(time_after(jiffies,timeout)){
2772 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2773 return -ETIMEDOUT;
2774 }
2775 schedule_timeout_uninterruptible(1);
2776 } while(m == EMPTY_QUEUE);
2777
2778 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2779
2780 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2781 if (!status) {
2782 adpt_send_nop(pHba, m);
2783 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2784 pHba->name);
2785 return -ENOMEM;
2786 }
2787 memset(status, 0, 4);
2788
2789 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2790 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2791 writel(0, &msg[2]);
2792 writel(0x0106, &msg[3]);
2793 writel(4096, &msg[4]);
2794 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]);
2795 writel(0xD0000004, &msg[6]);
2796 writel((u32)addr, &msg[7]);
2797
2798 writel(m, pHba->post_port);
2799 wmb();
2800
2801
2802 do {
2803 if (*status) {
2804 if (*status != 0x01 ) {
2805 break;
2806 }
2807 }
2808 rmb();
2809 if(time_after(jiffies,timeout)){
2810 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2811
2812
2813
2814
2815 return -ETIMEDOUT;
2816 }
2817 schedule_timeout_uninterruptible(1);
2818 } while (1);
2819
2820
2821
2822 if(*status != 0x04 ) {
2823 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2824 return -2;
2825 }
2826 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2827
2828 if(pHba->reply_pool != NULL) {
2829 dma_free_coherent(&pHba->pDev->dev,
2830 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2831 pHba->reply_pool, pHba->reply_pool_pa);
2832 }
2833
2834 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2835 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2836 &pHba->reply_pool_pa, GFP_KERNEL);
2837 if (!pHba->reply_pool) {
2838 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2839 return -ENOMEM;
2840 }
2841 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2842
2843 for(i = 0; i < pHba->reply_fifo_size; i++) {
2844 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2845 pHba->reply_port);
2846 wmb();
2847 }
2848 adpt_i2o_status_get(pHba);
2849 return 0;
2850}
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864static s32 adpt_i2o_status_get(adpt_hba* pHba)
2865{
2866 ulong timeout;
2867 u32 m;
2868 u32 __iomem *msg;
2869 u8 *status_block=NULL;
2870
2871 if(pHba->status_block == NULL) {
2872 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2873 sizeof(i2o_status_block),
2874 &pHba->status_block_pa, GFP_KERNEL);
2875 if(pHba->status_block == NULL) {
2876 printk(KERN_ERR
2877 "dpti%d: Get Status Block failed; Out of memory. \n",
2878 pHba->unit);
2879 return -ENOMEM;
2880 }
2881 }
2882 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2883 status_block = (u8*)(pHba->status_block);
2884 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2885 do {
2886 rmb();
2887 m = readl(pHba->post_port);
2888 if (m != EMPTY_QUEUE) {
2889 break;
2890 }
2891 if(time_after(jiffies,timeout)){
2892 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2893 pHba->name);
2894 return -ETIMEDOUT;
2895 }
2896 schedule_timeout_uninterruptible(1);
2897 } while(m==EMPTY_QUEUE);
2898
2899
2900 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2901
2902 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2903 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2904 writel(1, &msg[2]);
2905 writel(0, &msg[3]);
2906 writel(0, &msg[4]);
2907 writel(0, &msg[5]);
2908 writel( dma_low(pHba->status_block_pa), &msg[6]);
2909 writel( dma_high(pHba->status_block_pa), &msg[7]);
2910 writel(sizeof(i2o_status_block), &msg[8]);
2911
2912
2913 writel(m, pHba->post_port);
2914 wmb();
2915
2916 while(status_block[87]!=0xff){
2917 if(time_after(jiffies,timeout)){
2918 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2919 pHba->unit);
2920 return -ETIMEDOUT;
2921 }
2922 rmb();
2923 schedule_timeout_uninterruptible(1);
2924 }
2925
2926
2927 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2928 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2929 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2930 }
2931
2932 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2933 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2934 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2935 }
2936
2937
2938 if (dpt_dma64(pHba)) {
2939 pHba->sg_tablesize
2940 = ((pHba->status_block->inbound_frame_size * 4
2941 - 14 * sizeof(u32))
2942 / (sizeof(struct sg_simple_element) + sizeof(u32)));
2943 } else {
2944 pHba->sg_tablesize
2945 = ((pHba->status_block->inbound_frame_size * 4
2946 - 12 * sizeof(u32))
2947 / sizeof(struct sg_simple_element));
2948 }
2949 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2950 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2951 }
2952
2953
2954#ifdef DEBUG
2955 printk("dpti%d: State = ",pHba->unit);
2956 switch(pHba->status_block->iop_state) {
2957 case 0x01:
2958 printk("INIT\n");
2959 break;
2960 case 0x02:
2961 printk("RESET\n");
2962 break;
2963 case 0x04:
2964 printk("HOLD\n");
2965 break;
2966 case 0x05:
2967 printk("READY\n");
2968 break;
2969 case 0x08:
2970 printk("OPERATIONAL\n");
2971 break;
2972 case 0x10:
2973 printk("FAILED\n");
2974 break;
2975 case 0x11:
2976 printk("FAULTED\n");
2977 break;
2978 default:
2979 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2980 }
2981#endif
2982 return 0;
2983}
2984
2985
2986
2987
2988static int adpt_i2o_lct_get(adpt_hba* pHba)
2989{
2990 u32 msg[8];
2991 int ret;
2992 u32 buf[16];
2993
2994 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2995 pHba->lct_size = pHba->status_block->expected_lct_size;
2996 }
2997 do {
2998 if (pHba->lct == NULL) {
2999 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3000 pHba->lct_size, &pHba->lct_pa,
3001 GFP_ATOMIC);
3002 if(pHba->lct == NULL) {
3003 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3004 pHba->name);
3005 return -ENOMEM;
3006 }
3007 }
3008 memset(pHba->lct, 0, pHba->lct_size);
3009
3010 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3011 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3012 msg[2] = 0;
3013 msg[3] = 0;
3014 msg[4] = 0xFFFFFFFF;
3015 msg[5] = 0x00000000;
3016 msg[6] = 0xD0000000|pHba->lct_size;
3017 msg[7] = (u32)pHba->lct_pa;
3018
3019 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3020 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3021 pHba->name, ret);
3022 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3023 return ret;
3024 }
3025
3026 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3027 pHba->lct_size = pHba->lct->table_size << 2;
3028 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3029 pHba->lct, pHba->lct_pa);
3030 pHba->lct = NULL;
3031 }
3032 } while (pHba->lct == NULL);
3033
3034 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3035
3036
3037
3038 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3039 pHba->FwDebugBufferSize = buf[1];
3040 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3041 pHba->FwDebugBufferSize);
3042 if (pHba->FwDebugBuffer_P) {
3043 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3044 FW_DEBUG_FLAGS_OFFSET;
3045 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3046 FW_DEBUG_BLED_OFFSET;
3047 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3048 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3049 FW_DEBUG_STR_LENGTH_OFFSET;
3050 pHba->FwDebugBuffer_P += buf[2];
3051 pHba->FwDebugFlags = 0;
3052 }
3053 }
3054
3055 return 0;
3056}
3057
3058static int adpt_i2o_build_sys_table(void)
3059{
3060 adpt_hba* pHba = hba_chain;
3061 int count = 0;
3062
3063 if (sys_tbl)
3064 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3065 sys_tbl, sys_tbl_pa);
3066
3067 sys_tbl_len = sizeof(struct i2o_sys_tbl) +
3068 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3069
3070 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3071 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3072 if (!sys_tbl) {
3073 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3074 return -ENOMEM;
3075 }
3076 memset(sys_tbl, 0, sys_tbl_len);
3077
3078 sys_tbl->num_entries = hba_count;
3079 sys_tbl->version = I2OVERSION;
3080 sys_tbl->change_ind = sys_tbl_ind++;
3081
3082 for(pHba = hba_chain; pHba; pHba = pHba->next) {
3083 u64 addr;
3084
3085 if (adpt_i2o_status_get(pHba)) {
3086 sys_tbl->num_entries--;
3087 continue;
3088 }
3089
3090 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3091 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3092 sys_tbl->iops[count].seg_num = 0;
3093 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3094 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3095 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3096 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3097 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1;
3098 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3099 addr = pHba->base_addr_phys + 0x40;
3100 sys_tbl->iops[count].inbound_low = dma_low(addr);
3101 sys_tbl->iops[count].inbound_high = dma_high(addr);
3102
3103 count++;
3104 }
3105
3106#ifdef DEBUG
3107{
3108 u32 *table = (u32*)sys_tbl;
3109 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3110 for(count = 0; count < (sys_tbl_len >>2); count++) {
3111 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3112 count, table[count]);
3113 }
3114}
3115#endif
3116
3117 return 0;
3118}
3119
3120
3121
3122
3123
3124
3125static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3126{
3127 char buf[64];
3128 int unit = d->lct_data.tid;
3129
3130 printk(KERN_INFO "TID %3.3d ", unit);
3131
3132 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3133 {
3134 buf[16]=0;
3135 printk(" Vendor: %-12.12s", buf);
3136 }
3137 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3138 {
3139 buf[16]=0;
3140 printk(" Device: %-12.12s", buf);
3141 }
3142 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3143 {
3144 buf[8]=0;
3145 printk(" Rev: %-12.12s\n", buf);
3146 }
3147#ifdef DEBUG
3148 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3149 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3150 printk(KERN_INFO "\tFlags: ");
3151
3152 if(d->lct_data.device_flags&(1<<0))
3153 printk("C");
3154 if(d->lct_data.device_flags&(1<<1))
3155 printk("U");
3156 if(!(d->lct_data.device_flags&(1<<4)))
3157 printk("P");
3158 if(!(d->lct_data.device_flags&(1<<5)))
3159 printk("M");
3160 printk("\n");
3161#endif
3162}
3163
3164#ifdef DEBUG
3165
3166
3167
3168static const char *adpt_i2o_get_class_name(int class)
3169{
3170 int idx = 16;
3171 static char *i2o_class_name[] = {
3172 "Executive",
3173 "Device Driver Module",
3174 "Block Device",
3175 "Tape Device",
3176 "LAN Interface",
3177 "WAN Interface",
3178 "Fibre Channel Port",
3179 "Fibre Channel Device",
3180 "SCSI Device",
3181 "ATE Port",
3182 "ATE Device",
3183 "Floppy Controller",
3184 "Floppy Device",
3185 "Secondary Bus Port",
3186 "Peer Transport Agent",
3187 "Peer Transport",
3188 "Unknown"
3189 };
3190
3191 switch(class&0xFFF) {
3192 case I2O_CLASS_EXECUTIVE:
3193 idx = 0; break;
3194 case I2O_CLASS_DDM:
3195 idx = 1; break;
3196 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3197 idx = 2; break;
3198 case I2O_CLASS_SEQUENTIAL_STORAGE:
3199 idx = 3; break;
3200 case I2O_CLASS_LAN:
3201 idx = 4; break;
3202 case I2O_CLASS_WAN:
3203 idx = 5; break;
3204 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3205 idx = 6; break;
3206 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3207 idx = 7; break;
3208 case I2O_CLASS_SCSI_PERIPHERAL:
3209 idx = 8; break;
3210 case I2O_CLASS_ATE_PORT:
3211 idx = 9; break;
3212 case I2O_CLASS_ATE_PERIPHERAL:
3213 idx = 10; break;
3214 case I2O_CLASS_FLOPPY_CONTROLLER:
3215 idx = 11; break;
3216 case I2O_CLASS_FLOPPY_DEVICE:
3217 idx = 12; break;
3218 case I2O_CLASS_BUS_ADAPTER_PORT:
3219 idx = 13; break;
3220 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3221 idx = 14; break;
3222 case I2O_CLASS_PEER_TRANSPORT:
3223 idx = 15; break;
3224 }
3225 return i2o_class_name[idx];
3226}
3227#endif
3228
3229
3230static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3231{
3232 u32 msg[6];
3233 int ret, size = sizeof(i2o_hrt);
3234
3235 do {
3236 if (pHba->hrt == NULL) {
3237 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3238 size, &pHba->hrt_pa, GFP_KERNEL);
3239 if (pHba->hrt == NULL) {
3240 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3241 return -ENOMEM;
3242 }
3243 }
3244
3245 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3246 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3247 msg[2]= 0;
3248 msg[3]= 0;
3249 msg[4]= (0xD0000000 | size);
3250 msg[5]= (u32)pHba->hrt_pa;
3251
3252 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3253 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3254 return ret;
3255 }
3256
3257 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3258 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3259 dma_free_coherent(&pHba->pDev->dev, size,
3260 pHba->hrt, pHba->hrt_pa);
3261 size = newsize;
3262 pHba->hrt = NULL;
3263 }
3264 } while(pHba->hrt == NULL);
3265 return 0;
3266}
3267
3268
3269
3270
3271static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3272 int group, int field, void *buf, int buflen)
3273{
3274 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3275 u8 *opblk_va;
3276 dma_addr_t opblk_pa;
3277 u8 *resblk_va;
3278 dma_addr_t resblk_pa;
3279
3280 int size;
3281
3282
3283 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3284 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3285 if (resblk_va == NULL) {
3286 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3287 return -ENOMEM;
3288 }
3289
3290 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3291 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3292 if (opblk_va == NULL) {
3293 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3294 resblk_va, resblk_pa);
3295 printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
3296 pHba->name);
3297 return -ENOMEM;
3298 }
3299 if (field == -1)
3300 opblk[4] = -1;
3301
3302 memcpy(opblk_va, opblk, sizeof(opblk));
3303 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3304 opblk_va, opblk_pa, sizeof(opblk),
3305 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3306 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3307 if (size == -ETIME) {
3308 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3309 resblk_va, resblk_pa);
3310 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3311 return -ETIME;
3312 } else if (size == -EINTR) {
3313 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3314 resblk_va, resblk_pa);
3315 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3316 return -EINTR;
3317 }
3318
3319 memcpy(buf, resblk_va+8, buflen);
3320
3321 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3322 resblk_va, resblk_pa);
3323 if (size < 0)
3324 return size;
3325
3326 return buflen;
3327}
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3339 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3340 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3341{
3342 u32 msg[9];
3343 u32 *res = (u32 *)resblk_va;
3344 int wait_status;
3345
3346 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3347 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3348 msg[2] = 0;
3349 msg[3] = 0;
3350 msg[4] = 0;
3351 msg[5] = 0x54000000 | oplen;
3352 msg[6] = (u32)opblk_pa;
3353 msg[7] = 0xD0000000 | reslen;
3354 msg[8] = (u32)resblk_pa;
3355
3356 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3357 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3358 return wait_status;
3359 }
3360
3361 if (res[1]&0x00FF0000) {
3362 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3363 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3364 pHba->name,
3365 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3366 : "PARAMS_GET",
3367 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3368 return -((res[1] >> 16) & 0xFF);
3369 }
3370
3371 return 4 + ((res[1] & 0x0000FFFF) << 2);
3372}
3373
3374
3375static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3376{
3377 u32 msg[4];
3378 int ret;
3379
3380 adpt_i2o_status_get(pHba);
3381
3382
3383
3384 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3385 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3386 return 0;
3387 }
3388
3389 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3390 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3391 msg[2] = 0;
3392 msg[3] = 0;
3393
3394 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3395 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3396 pHba->unit, -ret);
3397 } else {
3398 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3399 }
3400
3401 adpt_i2o_status_get(pHba);
3402 return ret;
3403}
3404
3405
3406
3407
3408
3409static int adpt_i2o_enable_hba(adpt_hba* pHba)
3410{
3411 u32 msg[4];
3412 int ret;
3413
3414 adpt_i2o_status_get(pHba);
3415 if(!pHba->status_block){
3416 return -ENOMEM;
3417 }
3418
3419 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3420 return 0;
3421
3422 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3423 return -EINVAL;
3424
3425 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3426 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3427 msg[2]= 0;
3428 msg[3]= 0;
3429
3430 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3431 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3432 pHba->name, ret);
3433 } else {
3434 PDEBUG("%s: Enabled.\n", pHba->name);
3435 }
3436
3437 adpt_i2o_status_get(pHba);
3438 return ret;
3439}
3440
3441
3442static int adpt_i2o_systab_send(adpt_hba* pHba)
3443{
3444 u32 msg[12];
3445 int ret;
3446
3447 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3448 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3449 msg[2] = 0;
3450 msg[3] = 0;
3451 msg[4] = (0<<16) | ((pHba->unit+2) << 12);
3452 msg[5] = 0;
3453
3454
3455
3456
3457
3458
3459 msg[6] = 0x54000000 | sys_tbl_len;
3460 msg[7] = (u32)sys_tbl_pa;
3461 msg[8] = 0x54000000 | 0;
3462 msg[9] = 0;
3463 msg[10] = 0xD4000000 | 0;
3464 msg[11] = 0;
3465
3466 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3467 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3468 pHba->name, ret);
3469 }
3470#ifdef DEBUG
3471 else {
3472 PINFO("%s: SysTab set.\n", pHba->name);
3473 }
3474#endif
3475
3476 return ret;
3477}
3478
3479
3480
3481
3482
3483
3484
3485
3486#ifdef UARTDELAY
3487
3488static static void adpt_delay(int millisec)
3489{
3490 int i;
3491 for (i = 0; i < millisec; i++) {
3492 udelay(1000);
3493 }
3494}
3495
3496#endif
3497
3498static struct scsi_host_template driver_template = {
3499 .module = THIS_MODULE,
3500 .name = "dpt_i2o",
3501 .proc_name = "dpt_i2o",
3502 .show_info = adpt_show_info,
3503 .info = adpt_info,
3504 .queuecommand = adpt_queue,
3505 .eh_abort_handler = adpt_abort,
3506 .eh_device_reset_handler = adpt_device_reset,
3507 .eh_bus_reset_handler = adpt_bus_reset,
3508 .eh_host_reset_handler = adpt_reset,
3509 .bios_param = adpt_bios_param,
3510 .slave_configure = adpt_slave_configure,
3511 .can_queue = MAX_TO_IOP_MESSAGES,
3512 .this_id = 7,
3513};
3514
3515static int __init adpt_init(void)
3516{
3517 int error;
3518 adpt_hba *pHba, *next;
3519
3520 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3521
3522 error = adpt_detect(&driver_template);
3523 if (error < 0)
3524 return error;
3525 if (hba_chain == NULL)
3526 return -ENODEV;
3527
3528 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3529 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3530 if (error)
3531 goto fail;
3532 scsi_scan_host(pHba->host);
3533 }
3534 return 0;
3535fail:
3536 for (pHba = hba_chain; pHba; pHba = next) {
3537 next = pHba->next;
3538 scsi_remove_host(pHba->host);
3539 }
3540 return error;
3541}
3542
3543static void __exit adpt_exit(void)
3544{
3545 adpt_hba *pHba, *next;
3546
3547 for (pHba = hba_chain; pHba; pHba = next) {
3548 next = pHba->next;
3549 adpt_release(pHba);
3550 }
3551}
3552
3553module_init(adpt_init);
3554module_exit(adpt_exit);
3555
3556MODULE_LICENSE("GPL");
3557