1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/module.h>
33
34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36
37
38
39#include <linux/ioctl.h>
40#include <asm/uaccess.h>
41
42#include <linux/stat.h>
43#include <linux/slab.h>
44#include <linux/pci.h>
45#include <linux/proc_fs.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h>
48#include <linux/interrupt.h>
49#include <linux/kernel.h>
50#include <linux/sched.h>
51#include <linux/reboot.h>
52#include <linux/spinlock.h>
53#include <linux/dma-mapping.h>
54
55#include <linux/timer.h>
56#include <linux/string.h>
57#include <linux/ioport.h>
58#include <linux/mutex.h>
59
60#include <asm/processor.h>
61#include <asm/pgtable.h>
62#include <asm/io.h>
63
64#include <scsi/scsi.h>
65#include <scsi/scsi_cmnd.h>
66#include <scsi/scsi_device.h>
67#include <scsi/scsi_host.h>
68#include <scsi/scsi_tcq.h>
69
70#include "dpt/dptsig.h"
71#include "dpti.h"
72
73
74
75
76
77
78static DEFINE_MUTEX(adpt_mutex);
79static dpt_sig_S DPTI_sig = {
80 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81#ifdef __i386__
82 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83#elif defined(__ia64__)
84 PROC_INTEL, PROC_IA64,
85#elif defined(__sparc__)
86 PROC_ULTRASPARC, PROC_ULTRASPARC,
87#elif defined(__alpha__)
88 PROC_ALPHA, PROC_ALPHA,
89#else
90 (-1),(-1),
91#endif
92 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
95};
96
97
98
99
100
101
102
103
104
105static DEFINE_MUTEX(adpt_configuration_lock);
106
107static struct i2o_sys_tbl *sys_tbl;
108static dma_addr_t sys_tbl_pa;
109static int sys_tbl_ind;
110static int sys_tbl_len;
111
112static adpt_hba* hba_chain = NULL;
113static int hba_count = 0;
114
115static struct class *adpt_sysfs_class;
116
117static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
118#ifdef CONFIG_COMPAT
119static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
120#endif
121
122static const struct file_operations adpt_fops = {
123 .unlocked_ioctl = adpt_unlocked_ioctl,
124 .open = adpt_open,
125 .release = adpt_close,
126#ifdef CONFIG_COMPAT
127 .compat_ioctl = compat_adpt_ioctl,
128#endif
129 .llseek = noop_llseek,
130};
131
132
133
134
135struct adpt_i2o_post_wait_data
136{
137 int status;
138 u32 id;
139 adpt_wait_queue_head_t *wq;
140 struct adpt_i2o_post_wait_data *next;
141};
142
143static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
144static u32 adpt_post_wait_id = 0;
145static DEFINE_SPINLOCK(adpt_post_wait_lock);
146
147
148
149
150
151
152
153static inline int dpt_dma64(adpt_hba *pHba)
154{
155 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
156}
157
158static inline u32 dma_high(dma_addr_t addr)
159{
160 return upper_32_bits(addr);
161}
162
163static inline u32 dma_low(dma_addr_t addr)
164{
165 return (u32)addr;
166}
167
168static u8 adpt_read_blink_led(adpt_hba* host)
169{
170 if (host->FwDebugBLEDflag_P) {
171 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
172 return readb(host->FwDebugBLEDvalue_P);
173 }
174 }
175 return 0;
176}
177
178
179
180
181
182
183#ifdef MODULE
184static struct pci_device_id dptids[] = {
185 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
186 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
187 { 0, }
188};
189#endif
190
191MODULE_DEVICE_TABLE(pci,dptids);
192
193static int adpt_detect(struct scsi_host_template* sht)
194{
195 struct pci_dev *pDev = NULL;
196 adpt_hba *pHba;
197 adpt_hba *next;
198
199 PINFO("Detecting Adaptec I2O RAID controllers...\n");
200
201
202 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
203 if(pDev->device == PCI_DPT_DEVICE_ID ||
204 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
205 if(adpt_install_hba(sht, pDev) ){
206 PERROR("Could not Init an I2O RAID device\n");
207 PERROR("Will not try to detect others.\n");
208 return hba_count-1;
209 }
210 pci_dev_get(pDev);
211 }
212 }
213
214
215 for (pHba = hba_chain; pHba; pHba = next) {
216 next = pHba->next;
217
218 if (adpt_i2o_activate_hba(pHba) < 0) {
219 adpt_i2o_delete_hba(pHba);
220 }
221 }
222
223
224
225
226rebuild_sys_tab:
227 if (hba_chain == NULL)
228 return 0;
229
230
231
232
233
234 if (adpt_i2o_build_sys_table() < 0) {
235 adpt_i2o_sys_shutdown();
236 return 0;
237 }
238
239 PDEBUG("HBA's in HOLD state\n");
240
241
242 for (pHba = hba_chain; pHba; pHba = pHba->next) {
243 if (adpt_i2o_online_hba(pHba) < 0) {
244 adpt_i2o_delete_hba(pHba);
245 goto rebuild_sys_tab;
246 }
247 }
248
249
250 PDEBUG("HBA's in OPERATIONAL state\n");
251
252 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
253 for (pHba = hba_chain; pHba; pHba = next) {
254 next = pHba->next;
255 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
256 if (adpt_i2o_lct_get(pHba) < 0){
257 adpt_i2o_delete_hba(pHba);
258 continue;
259 }
260
261 if (adpt_i2o_parse_lct(pHba) < 0){
262 adpt_i2o_delete_hba(pHba);
263 continue;
264 }
265 adpt_inquiry(pHba);
266 }
267
268 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
269 if (IS_ERR(adpt_sysfs_class)) {
270 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
271 adpt_sysfs_class = NULL;
272 }
273
274 for (pHba = hba_chain; pHba; pHba = next) {
275 next = pHba->next;
276 if (adpt_scsi_host_alloc(pHba, sht) < 0){
277 adpt_i2o_delete_hba(pHba);
278 continue;
279 }
280 pHba->initialized = TRUE;
281 pHba->state &= ~DPTI_STATE_RESET;
282 if (adpt_sysfs_class) {
283 struct device *dev = device_create(adpt_sysfs_class,
284 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
285 "dpti%d", pHba->unit);
286 if (IS_ERR(dev)) {
287 printk(KERN_WARNING"dpti%d: unable to "
288 "create device in dpt_i2o class\n",
289 pHba->unit);
290 }
291 }
292 }
293
294
295
296
297 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
298 adpt_i2o_sys_shutdown();
299 return 0;
300 }
301 return hba_count;
302}
303
304
305
306
307
308static int adpt_release(struct Scsi_Host *host)
309{
310 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
311
312 adpt_i2o_delete_hba(pHba);
313 scsi_unregister(host);
314 return 0;
315}
316
317
318static void adpt_inquiry(adpt_hba* pHba)
319{
320 u32 msg[17];
321 u32 *mptr;
322 u32 *lenptr;
323 int direction;
324 int scsidir;
325 u32 len;
326 u32 reqlen;
327 u8* buf;
328 dma_addr_t addr;
329 u8 scb[16];
330 s32 rcode;
331
332 memset(msg, 0, sizeof(msg));
333 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
334 if(!buf){
335 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
336 return;
337 }
338 memset((void*)buf, 0, 36);
339
340 len = 36;
341 direction = 0x00000000;
342 scsidir =0x40000000;
343
344 if (dpt_dma64(pHba))
345 reqlen = 17;
346 else
347 reqlen = 14;
348
349 msg[0] = reqlen<<16 | SGL_OFFSET_12;
350 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
351 msg[2] = 0;
352 msg[3] = 0;
353
354 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
355 msg[5] = ADAPTER_TID | 1<<16 ;
356
357
358
359
360 msg[6] = scsidir|0x20a00000| 6 ;
361
362 mptr=msg+7;
363
364 memset(scb, 0, sizeof(scb));
365
366 scb[0] = INQUIRY;
367 scb[1] = 0;
368 scb[2] = 0;
369 scb[3] = 0;
370 scb[4] = 36;
371 scb[5] = 0;
372
373
374 memcpy(mptr, scb, sizeof(scb));
375 mptr+=4;
376 lenptr=mptr++;
377
378
379 *lenptr = len;
380 if (dpt_dma64(pHba)) {
381 *mptr++ = (0x7C<<24)+(2<<16)+0x02;
382 *mptr++ = 1 << PAGE_SHIFT;
383 *mptr++ = 0xD0000000|direction|len;
384 *mptr++ = dma_low(addr);
385 *mptr++ = dma_high(addr);
386 } else {
387 *mptr++ = 0xD0000000|direction|len;
388 *mptr++ = addr;
389 }
390
391
392 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
393 if (rcode != 0) {
394 sprintf(pHba->detail, "Adaptec I2O RAID");
395 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
396 if (rcode != -ETIME && rcode != -EINTR)
397 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
398 } else {
399 memset(pHba->detail, 0, sizeof(pHba->detail));
400 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
401 memcpy(&(pHba->detail[16]), " Model: ", 8);
402 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
403 memcpy(&(pHba->detail[40]), " FW: ", 4);
404 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
405 pHba->detail[48] = '\0';
406 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
407 }
408 adpt_i2o_status_get(pHba);
409 return ;
410}
411
412
413static int adpt_slave_configure(struct scsi_device * device)
414{
415 struct Scsi_Host *host = device->host;
416 adpt_hba* pHba;
417
418 pHba = (adpt_hba *) host->hostdata[0];
419
420 if (host->can_queue && device->tagged_supported) {
421 scsi_change_queue_depth(device,
422 host->can_queue - 1);
423 }
424 return 0;
425}
426
427static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
428{
429 adpt_hba* pHba = NULL;
430 struct adpt_device* pDev = NULL;
431
432 cmd->scsi_done = done;
433
434
435
436
437
438
439
440 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
441 cmd->result = (DID_OK << 16);
442 cmd->scsi_done(cmd);
443 return 0;
444 }
445
446 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
447 if (!pHba) {
448 return FAILED;
449 }
450
451 rmb();
452 if ((pHba->state) & DPTI_STATE_RESET)
453 return SCSI_MLQUEUE_HOST_BUSY;
454
455
456
457 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
458
459
460
461
462
463 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
464
465
466 cmd->result = (DID_NO_CONNECT << 16);
467 cmd->scsi_done(cmd);
468 return 0;
469 }
470 cmd->device->hostdata = pDev;
471 }
472 pDev->pScsi_dev = cmd->device;
473
474
475
476
477
478 if (pDev->state & DPTI_DEV_RESET ) {
479 return FAILED;
480 }
481 return adpt_scsi_to_i2o(pHba, cmd, pDev);
482}
483
484static DEF_SCSI_QCMD(adpt_queue)
485
486static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
487 sector_t capacity, int geom[])
488{
489 int heads=-1;
490 int sectors=-1;
491 int cylinders=-1;
492
493
494
495
496 if (capacity < 0x2000 ) {
497 heads = 18;
498 sectors = 2;
499 }
500
501 else if (capacity < 0x20000) {
502 heads = 64;
503 sectors = 32;
504 }
505
506 else if (capacity < 0x40000) {
507 heads = 65;
508 sectors = 63;
509 }
510
511 else if (capacity < 0x80000) {
512 heads = 128;
513 sectors = 63;
514 }
515
516 else {
517 heads = 255;
518 sectors = 63;
519 }
520 cylinders = sector_div(capacity, heads * sectors);
521
522
523 if(sdev->type == 5) {
524 heads = 252;
525 sectors = 63;
526 cylinders = 1111;
527 }
528
529 geom[0] = heads;
530 geom[1] = sectors;
531 geom[2] = cylinders;
532
533 PDEBUG("adpt_bios_param: exit\n");
534 return 0;
535}
536
537
538static const char *adpt_info(struct Scsi_Host *host)
539{
540 adpt_hba* pHba;
541
542 pHba = (adpt_hba *) host->hostdata[0];
543 return (char *) (pHba->detail);
544}
545
546static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
547{
548 struct adpt_device* d;
549 int id;
550 int chan;
551 adpt_hba* pHba;
552 int unit;
553
554
555 mutex_lock(&adpt_configuration_lock);
556 for (pHba = hba_chain; pHba; pHba = pHba->next) {
557 if (pHba->host == host) {
558 break;
559 }
560 }
561 mutex_unlock(&adpt_configuration_lock);
562 if (pHba == NULL) {
563 return 0;
564 }
565 host = pHba->host;
566
567 seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
568 seq_printf(m, "%s\n", pHba->detail);
569 seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
570 pHba->host->host_no, pHba->name, host->irq);
571 seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
572 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
573
574 seq_puts(m, "Devices:\n");
575 for(chan = 0; chan < MAX_CHANNEL; chan++) {
576 for(id = 0; id < MAX_ID; id++) {
577 d = pHba->channel[chan].device[id];
578 while(d) {
579 seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
580 seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
581
582 unit = d->pI2o_dev->lct_data.tid;
583 seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
584 unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
585 scsi_device_online(d->pScsi_dev)? "online":"offline");
586 d = d->next_lun;
587 }
588 }
589 }
590 return 0;
591}
592
593
594
595
596static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
597{
598 return (u32)cmd->serial_number;
599}
600
601
602
603
604
605static struct scsi_cmnd *
606 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
607{
608 struct scsi_cmnd * cmd;
609 struct scsi_device * d;
610
611 if (context == 0)
612 return NULL;
613
614 spin_unlock(pHba->host->host_lock);
615 shost_for_each_device(d, pHba->host) {
616 unsigned long flags;
617 spin_lock_irqsave(&d->list_lock, flags);
618 list_for_each_entry(cmd, &d->cmd_list, list) {
619 if (((u32)cmd->serial_number == context)) {
620 spin_unlock_irqrestore(&d->list_lock, flags);
621 scsi_device_put(d);
622 spin_lock(pHba->host->host_lock);
623 return cmd;
624 }
625 }
626 spin_unlock_irqrestore(&d->list_lock, flags);
627 }
628 spin_lock(pHba->host->host_lock);
629
630 return NULL;
631}
632
633
634
635
636static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
637{
638#if BITS_PER_LONG == 32
639 return (u32)(unsigned long)reply;
640#else
641 ulong flags = 0;
642 u32 nr, i;
643
644 spin_lock_irqsave(pHba->host->host_lock, flags);
645 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
646 for (i = 0; i < nr; i++) {
647 if (pHba->ioctl_reply_context[i] == NULL) {
648 pHba->ioctl_reply_context[i] = reply;
649 break;
650 }
651 }
652 spin_unlock_irqrestore(pHba->host->host_lock, flags);
653 if (i >= nr) {
654 kfree (reply);
655 printk(KERN_WARNING"%s: Too many outstanding "
656 "ioctl commands\n", pHba->name);
657 return (u32)-1;
658 }
659
660 return i;
661#endif
662}
663
664
665
666
667static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
668{
669#if BITS_PER_LONG == 32
670 return (void *)(unsigned long)context;
671#else
672 void *p = pHba->ioctl_reply_context[context];
673 pHba->ioctl_reply_context[context] = NULL;
674
675 return p;
676#endif
677}
678
679
680
681
682
683
684static int adpt_abort(struct scsi_cmnd * cmd)
685{
686 adpt_hba* pHba = NULL;
687 struct adpt_device* dptdevice;
688 u32 msg[5];
689 int rcode;
690
691 if(cmd->serial_number == 0){
692 return FAILED;
693 }
694 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
695 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
696 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
697 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
698 return FAILED;
699 }
700
701 memset(msg, 0, sizeof(msg));
702 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
703 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
704 msg[2] = 0;
705 msg[3]= 0;
706 msg[4] = adpt_cmd_to_context(cmd);
707 if (pHba->host)
708 spin_lock_irq(pHba->host->host_lock);
709 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
710 if (pHba->host)
711 spin_unlock_irq(pHba->host->host_lock);
712 if (rcode != 0) {
713 if(rcode == -EOPNOTSUPP ){
714 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
715 return FAILED;
716 }
717 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
718 return FAILED;
719 }
720 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
721 return SUCCESS;
722}
723
724
725#define I2O_DEVICE_RESET 0x27
726
727
728
729static int adpt_device_reset(struct scsi_cmnd* cmd)
730{
731 adpt_hba* pHba;
732 u32 msg[4];
733 u32 rcode;
734 int old_state;
735 struct adpt_device* d = cmd->device->hostdata;
736
737 pHba = (void*) cmd->device->host->hostdata[0];
738 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
739 if (!d) {
740 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
741 return FAILED;
742 }
743 memset(msg, 0, sizeof(msg));
744 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
745 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
746 msg[2] = 0;
747 msg[3] = 0;
748
749 if (pHba->host)
750 spin_lock_irq(pHba->host->host_lock);
751 old_state = d->state;
752 d->state |= DPTI_DEV_RESET;
753 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
754 d->state = old_state;
755 if (pHba->host)
756 spin_unlock_irq(pHba->host->host_lock);
757 if (rcode != 0) {
758 if(rcode == -EOPNOTSUPP ){
759 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
760 return FAILED;
761 }
762 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
763 return FAILED;
764 } else {
765 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
766 return SUCCESS;
767 }
768}
769
770
771#define I2O_HBA_BUS_RESET 0x87
772
773static int adpt_bus_reset(struct scsi_cmnd* cmd)
774{
775 adpt_hba* pHba;
776 u32 msg[4];
777 u32 rcode;
778
779 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
780 memset(msg, 0, sizeof(msg));
781 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
782 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
783 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
784 msg[2] = 0;
785 msg[3] = 0;
786 if (pHba->host)
787 spin_lock_irq(pHba->host->host_lock);
788 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
789 if (pHba->host)
790 spin_unlock_irq(pHba->host->host_lock);
791 if (rcode != 0) {
792 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
793 return FAILED;
794 } else {
795 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
796 return SUCCESS;
797 }
798}
799
800
801static int __adpt_reset(struct scsi_cmnd* cmd)
802{
803 adpt_hba* pHba;
804 int rcode;
805 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
806 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
807 rcode = adpt_hba_reset(pHba);
808 if(rcode == 0){
809 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
810 return SUCCESS;
811 } else {
812 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
813 return FAILED;
814 }
815}
816
817static int adpt_reset(struct scsi_cmnd* cmd)
818{
819 int rc;
820
821 spin_lock_irq(cmd->device->host->host_lock);
822 rc = __adpt_reset(cmd);
823 spin_unlock_irq(cmd->device->host->host_lock);
824
825 return rc;
826}
827
828
829static int adpt_hba_reset(adpt_hba* pHba)
830{
831 int rcode;
832
833 pHba->state |= DPTI_STATE_RESET;
834
835
836 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
837 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
838 adpt_i2o_delete_hba(pHba);
839 return rcode;
840 }
841
842 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
843 adpt_i2o_delete_hba(pHba);
844 return rcode;
845 }
846 PDEBUG("%s: in HOLD state\n",pHba->name);
847
848 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
849 adpt_i2o_delete_hba(pHba);
850 return rcode;
851 }
852 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
853
854 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
855 adpt_i2o_delete_hba(pHba);
856 return rcode;
857 }
858
859 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
860 adpt_i2o_delete_hba(pHba);
861 return rcode;
862 }
863 pHba->state &= ~DPTI_STATE_RESET;
864
865 adpt_fail_posted_scbs(pHba);
866 return 0;
867}
868
869
870
871
872
873
874
875static void adpt_i2o_sys_shutdown(void)
876{
877 adpt_hba *pHba, *pNext;
878 struct adpt_i2o_post_wait_data *p1, *old;
879
880 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
881 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
882
883
884
885
886 for (pHba = hba_chain; pHba; pHba = pNext) {
887 pNext = pHba->next;
888 adpt_i2o_delete_hba(pHba);
889 }
890
891
892
893
894
895
896 for(p1 = adpt_post_wait_queue; p1;) {
897 old = p1;
898 p1 = p1->next;
899 kfree(old);
900 }
901
902 adpt_post_wait_queue = NULL;
903
904 printk(KERN_INFO "Adaptec I2O controllers down.\n");
905}
906
907static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
908{
909
910 adpt_hba* pHba = NULL;
911 adpt_hba* p = NULL;
912 ulong base_addr0_phys = 0;
913 ulong base_addr1_phys = 0;
914 u32 hba_map0_area_size = 0;
915 u32 hba_map1_area_size = 0;
916 void __iomem *base_addr_virt = NULL;
917 void __iomem *msg_addr_virt = NULL;
918 int dma64 = 0;
919
920 int raptorFlag = FALSE;
921
922 if(pci_enable_device(pDev)) {
923 return -EINVAL;
924 }
925
926 if (pci_request_regions(pDev, "dpt_i2o")) {
927 PERROR("dpti: adpt_config_hba: pci request region failed\n");
928 return -EINVAL;
929 }
930
931 pci_set_master(pDev);
932
933
934
935
936 if (sizeof(dma_addr_t) > 4 &&
937 pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
938 if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
939 dma64 = 1;
940 }
941 if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
942 return -EINVAL;
943
944
945 pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
946
947 base_addr0_phys = pci_resource_start(pDev,0);
948 hba_map0_area_size = pci_resource_len(pDev,0);
949
950
951 if(pDev->device == PCI_DPT_DEVICE_ID){
952 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
953
954 hba_map0_area_size = 0x400000;
955 } else {
956 if(hba_map0_area_size > 0x100000 ){
957 hba_map0_area_size = 0x100000;
958 }
959 }
960 } else {
961
962 base_addr1_phys = pci_resource_start(pDev,1);
963 hba_map1_area_size = pci_resource_len(pDev,1);
964 raptorFlag = TRUE;
965 }
966
967#if BITS_PER_LONG == 64
968
969
970
971
972
973
974
975 if (raptorFlag == TRUE) {
976 if (hba_map0_area_size > 128)
977 hba_map0_area_size = 128;
978 if (hba_map1_area_size > 524288)
979 hba_map1_area_size = 524288;
980 } else {
981 if (hba_map0_area_size > 524288)
982 hba_map0_area_size = 524288;
983 }
984#endif
985
986 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
987 if (!base_addr_virt) {
988 pci_release_regions(pDev);
989 PERROR("dpti: adpt_config_hba: io remap failed\n");
990 return -EINVAL;
991 }
992
993 if(raptorFlag == TRUE) {
994 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
995 if (!msg_addr_virt) {
996 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
997 iounmap(base_addr_virt);
998 pci_release_regions(pDev);
999 return -EINVAL;
1000 }
1001 } else {
1002 msg_addr_virt = base_addr_virt;
1003 }
1004
1005
1006 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1007 if (!pHba) {
1008 if (msg_addr_virt != base_addr_virt)
1009 iounmap(msg_addr_virt);
1010 iounmap(base_addr_virt);
1011 pci_release_regions(pDev);
1012 return -ENOMEM;
1013 }
1014
1015 mutex_lock(&adpt_configuration_lock);
1016
1017 if(hba_chain != NULL){
1018 for(p = hba_chain; p->next; p = p->next);
1019 p->next = pHba;
1020 } else {
1021 hba_chain = pHba;
1022 }
1023 pHba->next = NULL;
1024 pHba->unit = hba_count;
1025 sprintf(pHba->name, "dpti%d", hba_count);
1026 hba_count++;
1027
1028 mutex_unlock(&adpt_configuration_lock);
1029
1030 pHba->pDev = pDev;
1031 pHba->base_addr_phys = base_addr0_phys;
1032
1033
1034 pHba->base_addr_virt = base_addr_virt;
1035 pHba->msg_addr_virt = msg_addr_virt;
1036 pHba->irq_mask = base_addr_virt+0x30;
1037 pHba->post_port = base_addr_virt+0x40;
1038 pHba->reply_port = base_addr_virt+0x44;
1039
1040 pHba->hrt = NULL;
1041 pHba->lct = NULL;
1042 pHba->lct_size = 0;
1043 pHba->status_block = NULL;
1044 pHba->post_count = 0;
1045 pHba->state = DPTI_STATE_RESET;
1046 pHba->pDev = pDev;
1047 pHba->devices = NULL;
1048 pHba->dma64 = dma64;
1049
1050
1051 spin_lock_init(&pHba->state_lock);
1052 spin_lock_init(&adpt_post_wait_lock);
1053
1054 if(raptorFlag == 0){
1055 printk(KERN_INFO "Adaptec I2O RAID controller"
1056 " %d at %p size=%x irq=%d%s\n",
1057 hba_count-1, base_addr_virt,
1058 hba_map0_area_size, pDev->irq,
1059 dma64 ? " (64-bit DMA)" : "");
1060 } else {
1061 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1062 hba_count-1, pDev->irq,
1063 dma64 ? " (64-bit DMA)" : "");
1064 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1065 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1066 }
1067
1068 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1069 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1070 adpt_i2o_delete_hba(pHba);
1071 return -EINVAL;
1072 }
1073
1074 return 0;
1075}
1076
1077
1078static void adpt_i2o_delete_hba(adpt_hba* pHba)
1079{
1080 adpt_hba* p1;
1081 adpt_hba* p2;
1082 struct i2o_device* d;
1083 struct i2o_device* next;
1084 int i;
1085 int j;
1086 struct adpt_device* pDev;
1087 struct adpt_device* pNext;
1088
1089
1090 mutex_lock(&adpt_configuration_lock);
1091
1092
1093 if(pHba->host){
1094 free_irq(pHba->host->irq, pHba);
1095 }
1096 p2 = NULL;
1097 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1098 if(p1 == pHba) {
1099 if(p2) {
1100 p2->next = p1->next;
1101 } else {
1102 hba_chain = p1->next;
1103 }
1104 break;
1105 }
1106 }
1107
1108 hba_count--;
1109 mutex_unlock(&adpt_configuration_lock);
1110
1111 iounmap(pHba->base_addr_virt);
1112 pci_release_regions(pHba->pDev);
1113 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1114 iounmap(pHba->msg_addr_virt);
1115 }
1116 if(pHba->FwDebugBuffer_P)
1117 iounmap(pHba->FwDebugBuffer_P);
1118 if(pHba->hrt) {
1119 dma_free_coherent(&pHba->pDev->dev,
1120 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1121 pHba->hrt, pHba->hrt_pa);
1122 }
1123 if(pHba->lct) {
1124 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1125 pHba->lct, pHba->lct_pa);
1126 }
1127 if(pHba->status_block) {
1128 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1129 pHba->status_block, pHba->status_block_pa);
1130 }
1131 if(pHba->reply_pool) {
1132 dma_free_coherent(&pHba->pDev->dev,
1133 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1134 pHba->reply_pool, pHba->reply_pool_pa);
1135 }
1136
1137 for(d = pHba->devices; d ; d = next){
1138 next = d->next;
1139 kfree(d);
1140 }
1141 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1142 for(j = 0; j < MAX_ID; j++){
1143 if(pHba->channel[i].device[j] != NULL){
1144 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1145 pNext = pDev->next_lun;
1146 kfree(pDev);
1147 }
1148 }
1149 }
1150 }
1151 pci_dev_put(pHba->pDev);
1152 if (adpt_sysfs_class)
1153 device_destroy(adpt_sysfs_class,
1154 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1155 kfree(pHba);
1156
1157 if(hba_count <= 0){
1158 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1159 if (adpt_sysfs_class) {
1160 class_destroy(adpt_sysfs_class);
1161 adpt_sysfs_class = NULL;
1162 }
1163 }
1164}
1165
1166static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1167{
1168 struct adpt_device* d;
1169
1170 if(chan < 0 || chan >= MAX_CHANNEL)
1171 return NULL;
1172
1173 if( pHba->channel[chan].device == NULL){
1174 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1175 return NULL;
1176 }
1177
1178 d = pHba->channel[chan].device[id];
1179 if(!d || d->tid == 0) {
1180 return NULL;
1181 }
1182
1183
1184 if(d->scsi_lun == lun){
1185 return d;
1186 }
1187
1188
1189 for(d=d->next_lun ; d ; d = d->next_lun){
1190 if(d->scsi_lun == lun){
1191 return d;
1192 }
1193 }
1194 return NULL;
1195}
1196
1197
1198static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1199{
1200
1201
1202
1203 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1204 int status = 0;
1205 ulong flags = 0;
1206 struct adpt_i2o_post_wait_data *p1, *p2;
1207 struct adpt_i2o_post_wait_data *wait_data =
1208 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1209 DECLARE_WAITQUEUE(wait, current);
1210
1211 if (!wait_data)
1212 return -ENOMEM;
1213
1214
1215
1216
1217
1218 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1219
1220
1221 wait_data->next = adpt_post_wait_queue;
1222 adpt_post_wait_queue = wait_data;
1223 adpt_post_wait_id++;
1224 adpt_post_wait_id &= 0x7fff;
1225 wait_data->id = adpt_post_wait_id;
1226 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1227
1228 wait_data->wq = &adpt_wq_i2o_post;
1229 wait_data->status = -ETIMEDOUT;
1230
1231 add_wait_queue(&adpt_wq_i2o_post, &wait);
1232
1233 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1234 timeout *= HZ;
1235 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1236 set_current_state(TASK_INTERRUPTIBLE);
1237 if(pHba->host)
1238 spin_unlock_irq(pHba->host->host_lock);
1239 if (!timeout)
1240 schedule();
1241 else{
1242 timeout = schedule_timeout(timeout);
1243 if (timeout == 0) {
1244
1245
1246
1247 status = -ETIME;
1248 }
1249 }
1250 if(pHba->host)
1251 spin_lock_irq(pHba->host->host_lock);
1252 }
1253 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1254
1255 if(status == -ETIMEDOUT){
1256 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1257
1258 return status;
1259 }
1260
1261
1262 p2 = NULL;
1263 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1264 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1265 if(p1 == wait_data) {
1266 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1267 status = -EOPNOTSUPP;
1268 }
1269 if(p2) {
1270 p2->next = p1->next;
1271 } else {
1272 adpt_post_wait_queue = p1->next;
1273 }
1274 break;
1275 }
1276 }
1277 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1278
1279 kfree(wait_data);
1280
1281 return status;
1282}
1283
1284
1285static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1286{
1287
1288 u32 m = EMPTY_QUEUE;
1289 u32 __iomem *msg;
1290 ulong timeout = jiffies + 30*HZ;
1291 do {
1292 rmb();
1293 m = readl(pHba->post_port);
1294 if (m != EMPTY_QUEUE) {
1295 break;
1296 }
1297 if(time_after(jiffies,timeout)){
1298 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1299 return -ETIMEDOUT;
1300 }
1301 schedule_timeout_uninterruptible(1);
1302 } while(m == EMPTY_QUEUE);
1303
1304 msg = pHba->msg_addr_virt + m;
1305 memcpy_toio(msg, data, len);
1306 wmb();
1307
1308
1309 writel(m, pHba->post_port);
1310 wmb();
1311
1312 return 0;
1313}
1314
1315
1316static void adpt_i2o_post_wait_complete(u32 context, int status)
1317{
1318 struct adpt_i2o_post_wait_data *p1 = NULL;
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332 context &= 0x7fff;
1333
1334 spin_lock(&adpt_post_wait_lock);
1335 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1336 if(p1->id == context) {
1337 p1->status = status;
1338 spin_unlock(&adpt_post_wait_lock);
1339 wake_up_interruptible(p1->wq);
1340 return;
1341 }
1342 }
1343 spin_unlock(&adpt_post_wait_lock);
1344
1345 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1346 printk(KERN_DEBUG" Tasks in wait queue:\n");
1347 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1348 printk(KERN_DEBUG" %d\n",p1->id);
1349 }
1350 return;
1351}
1352
1353static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1354{
1355 u32 msg[8];
1356 u8* status;
1357 dma_addr_t addr;
1358 u32 m = EMPTY_QUEUE ;
1359 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1360
1361 if(pHba->initialized == FALSE) {
1362 timeout = jiffies + (25*HZ);
1363 } else {
1364 adpt_i2o_quiesce_hba(pHba);
1365 }
1366
1367 do {
1368 rmb();
1369 m = readl(pHba->post_port);
1370 if (m != EMPTY_QUEUE) {
1371 break;
1372 }
1373 if(time_after(jiffies,timeout)){
1374 printk(KERN_WARNING"Timeout waiting for message!\n");
1375 return -ETIMEDOUT;
1376 }
1377 schedule_timeout_uninterruptible(1);
1378 } while (m == EMPTY_QUEUE);
1379
1380 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1381 if(status == NULL) {
1382 adpt_send_nop(pHba, m);
1383 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1384 return -ENOMEM;
1385 }
1386 memset(status,0,4);
1387
1388 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1389 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1390 msg[2]=0;
1391 msg[3]=0;
1392 msg[4]=0;
1393 msg[5]=0;
1394 msg[6]=dma_low(addr);
1395 msg[7]=dma_high(addr);
1396
1397 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1398 wmb();
1399 writel(m, pHba->post_port);
1400 wmb();
1401
1402 while(*status == 0){
1403 if(time_after(jiffies,timeout)){
1404 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1405
1406
1407
1408
1409 return -ETIMEDOUT;
1410 }
1411 rmb();
1412 schedule_timeout_uninterruptible(1);
1413 }
1414
1415 if(*status == 0x01 ) {
1416 PDEBUG("%s: Reset in progress...\n", pHba->name);
1417
1418
1419 do {
1420 rmb();
1421 m = readl(pHba->post_port);
1422 if (m != EMPTY_QUEUE) {
1423 break;
1424 }
1425 if(time_after(jiffies,timeout)){
1426 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1427
1428
1429
1430
1431 return -ETIMEDOUT;
1432 }
1433 schedule_timeout_uninterruptible(1);
1434 } while (m == EMPTY_QUEUE);
1435
1436 adpt_send_nop(pHba, m);
1437 }
1438 adpt_i2o_status_get(pHba);
1439 if(*status == 0x02 ||
1440 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1441 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1442 pHba->name);
1443 } else {
1444 PDEBUG("%s: Reset completed.\n", pHba->name);
1445 }
1446
1447 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1448#ifdef UARTDELAY
1449
1450
1451 adpt_delay(20000);
1452#endif
1453 return 0;
1454}
1455
1456
1457static int adpt_i2o_parse_lct(adpt_hba* pHba)
1458{
1459 int i;
1460 int max;
1461 int tid;
1462 struct i2o_device *d;
1463 i2o_lct *lct = pHba->lct;
1464 u8 bus_no = 0;
1465 s16 scsi_id;
1466 u64 scsi_lun;
1467 u32 buf[10];
1468 struct adpt_device* pDev;
1469
1470 if (lct == NULL) {
1471 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1472 return -1;
1473 }
1474
1475 max = lct->table_size;
1476 max -= 3;
1477 max /= 9;
1478
1479 for(i=0;i<max;i++) {
1480 if( lct->lct_entry[i].user_tid != 0xfff){
1481
1482
1483
1484
1485
1486
1487
1488 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1489 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1490 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1491 continue;
1492 }
1493 tid = lct->lct_entry[i].tid;
1494
1495 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1496 continue;
1497 }
1498 bus_no = buf[0]>>16;
1499 scsi_id = buf[1];
1500 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1501 if(bus_no >= MAX_CHANNEL) {
1502 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1503 continue;
1504 }
1505 if (scsi_id >= MAX_ID){
1506 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1507 continue;
1508 }
1509 if(bus_no > pHba->top_scsi_channel){
1510 pHba->top_scsi_channel = bus_no;
1511 }
1512 if(scsi_id > pHba->top_scsi_id){
1513 pHba->top_scsi_id = scsi_id;
1514 }
1515 if(scsi_lun > pHba->top_scsi_lun){
1516 pHba->top_scsi_lun = scsi_lun;
1517 }
1518 continue;
1519 }
1520 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1521 if(d==NULL)
1522 {
1523 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1524 return -ENOMEM;
1525 }
1526
1527 d->controller = pHba;
1528 d->next = NULL;
1529
1530 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1531
1532 d->flags = 0;
1533 tid = d->lct_data.tid;
1534 adpt_i2o_report_hba_unit(pHba, d);
1535 adpt_i2o_install_device(pHba, d);
1536 }
1537 bus_no = 0;
1538 for(d = pHba->devices; d ; d = d->next) {
1539 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1540 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1541 tid = d->lct_data.tid;
1542
1543
1544 if(bus_no > pHba->top_scsi_channel){
1545 pHba->top_scsi_channel = bus_no;
1546 }
1547 pHba->channel[bus_no].type = d->lct_data.class_id;
1548 pHba->channel[bus_no].tid = tid;
1549 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1550 {
1551 pHba->channel[bus_no].scsi_id = buf[1];
1552 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1553 }
1554
1555 bus_no++;
1556 if(bus_no >= MAX_CHANNEL) {
1557 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1558 break;
1559 }
1560 }
1561 }
1562
1563
1564 for(d = pHba->devices; d ; d = d->next) {
1565 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1566 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1567 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1568
1569 tid = d->lct_data.tid;
1570 scsi_id = -1;
1571
1572 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1573 bus_no = buf[0]>>16;
1574 scsi_id = buf[1];
1575 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1576 if(bus_no >= MAX_CHANNEL) {
1577 continue;
1578 }
1579 if (scsi_id >= MAX_ID) {
1580 continue;
1581 }
1582 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1583 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1584 if(pDev == NULL) {
1585 return -ENOMEM;
1586 }
1587 pHba->channel[bus_no].device[scsi_id] = pDev;
1588 } else {
1589 for( pDev = pHba->channel[bus_no].device[scsi_id];
1590 pDev->next_lun; pDev = pDev->next_lun){
1591 }
1592 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1593 if(pDev->next_lun == NULL) {
1594 return -ENOMEM;
1595 }
1596 pDev = pDev->next_lun;
1597 }
1598 pDev->tid = tid;
1599 pDev->scsi_channel = bus_no;
1600 pDev->scsi_id = scsi_id;
1601 pDev->scsi_lun = scsi_lun;
1602 pDev->pI2o_dev = d;
1603 d->owner = pDev;
1604 pDev->type = (buf[0])&0xff;
1605 pDev->flags = (buf[0]>>8)&0xff;
1606 if(scsi_id > pHba->top_scsi_id){
1607 pHba->top_scsi_id = scsi_id;
1608 }
1609 if(scsi_lun > pHba->top_scsi_lun){
1610 pHba->top_scsi_lun = scsi_lun;
1611 }
1612 }
1613 if(scsi_id == -1){
1614 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1615 d->lct_data.identity_tag);
1616 }
1617 }
1618 }
1619 return 0;
1620}
1621
1622
1623
1624
1625
1626
1627
1628static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1629{
1630 mutex_lock(&adpt_configuration_lock);
1631 d->controller=pHba;
1632 d->owner=NULL;
1633 d->next=pHba->devices;
1634 d->prev=NULL;
1635 if (pHba->devices != NULL){
1636 pHba->devices->prev=d;
1637 }
1638 pHba->devices=d;
1639 *d->dev_name = 0;
1640
1641 mutex_unlock(&adpt_configuration_lock);
1642 return 0;
1643}
1644
1645static int adpt_open(struct inode *inode, struct file *file)
1646{
1647 int minor;
1648 adpt_hba* pHba;
1649
1650 mutex_lock(&adpt_mutex);
1651
1652
1653 minor = iminor(inode);
1654 if (minor >= hba_count) {
1655 mutex_unlock(&adpt_mutex);
1656 return -ENXIO;
1657 }
1658 mutex_lock(&adpt_configuration_lock);
1659 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1660 if (pHba->unit == minor) {
1661 break;
1662 }
1663 }
1664 if (pHba == NULL) {
1665 mutex_unlock(&adpt_configuration_lock);
1666 mutex_unlock(&adpt_mutex);
1667 return -ENXIO;
1668 }
1669
1670
1671
1672
1673
1674
1675 pHba->in_use = 1;
1676 mutex_unlock(&adpt_configuration_lock);
1677 mutex_unlock(&adpt_mutex);
1678
1679 return 0;
1680}
1681
1682static int adpt_close(struct inode *inode, struct file *file)
1683{
1684 int minor;
1685 adpt_hba* pHba;
1686
1687 minor = iminor(inode);
1688 if (minor >= hba_count) {
1689 return -ENXIO;
1690 }
1691 mutex_lock(&adpt_configuration_lock);
1692 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1693 if (pHba->unit == minor) {
1694 break;
1695 }
1696 }
1697 mutex_unlock(&adpt_configuration_lock);
1698 if (pHba == NULL) {
1699 return -ENXIO;
1700 }
1701
1702 pHba->in_use = 0;
1703
1704 return 0;
1705}
1706
1707
1708static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1709{
1710 u32 msg[MAX_MESSAGE_SIZE];
1711 u32* reply = NULL;
1712 u32 size = 0;
1713 u32 reply_size = 0;
1714 u32 __user *user_msg = arg;
1715 u32 __user * user_reply = NULL;
1716 void *sg_list[pHba->sg_tablesize];
1717 u32 sg_offset = 0;
1718 u32 sg_count = 0;
1719 int sg_index = 0;
1720 u32 i = 0;
1721 u32 rcode = 0;
1722 void *p = NULL;
1723 dma_addr_t addr;
1724 ulong flags = 0;
1725
1726 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1727
1728 if(get_user(size, &user_msg[0])){
1729 return -EFAULT;
1730 }
1731 size = size>>16;
1732
1733 user_reply = &user_msg[size];
1734 if(size > MAX_MESSAGE_SIZE){
1735 return -EFAULT;
1736 }
1737 size *= 4;
1738
1739
1740 if(copy_from_user(msg, user_msg, size)) {
1741 return -EFAULT;
1742 }
1743 get_user(reply_size, &user_reply[0]);
1744 reply_size = reply_size>>16;
1745 if(reply_size > REPLY_FRAME_SIZE){
1746 reply_size = REPLY_FRAME_SIZE;
1747 }
1748 reply_size *= 4;
1749 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1750 if(reply == NULL) {
1751 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1752 return -ENOMEM;
1753 }
1754 sg_offset = (msg[0]>>4)&0xf;
1755 msg[2] = 0x40000000;
1756 msg[3] = adpt_ioctl_to_context(pHba, reply);
1757 if (msg[3] == (u32)-1)
1758 return -EBUSY;
1759
1760 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1761 if(sg_offset) {
1762
1763 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1764 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1765 if (sg_count > pHba->sg_tablesize){
1766 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1767 kfree (reply);
1768 return -EINVAL;
1769 }
1770
1771 for(i = 0; i < sg_count; i++) {
1772 int sg_size;
1773
1774 if (!(sg[i].flag_count & 0x10000000 )) {
1775 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1776 rcode = -EINVAL;
1777 goto cleanup;
1778 }
1779 sg_size = sg[i].flag_count & 0xffffff;
1780
1781 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1782 if(!p) {
1783 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1784 pHba->name,sg_size,i,sg_count);
1785 rcode = -ENOMEM;
1786 goto cleanup;
1787 }
1788 sg_list[sg_index++] = p;
1789
1790 if(sg[i].flag_count & 0x04000000 ) {
1791
1792 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1793 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1794 rcode = -EFAULT;
1795 goto cleanup;
1796 }
1797 }
1798
1799 sg[i].addr_bus = addr;
1800 }
1801 }
1802
1803 do {
1804
1805
1806
1807
1808 if (pHba->host) {
1809 scsi_block_requests(pHba->host);
1810 spin_lock_irqsave(pHba->host->host_lock, flags);
1811 }
1812 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1813 if (rcode != 0)
1814 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1815 rcode, reply);
1816 if (pHba->host) {
1817 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1818 scsi_unblock_requests(pHba->host);
1819 }
1820 } while (rcode == -ETIMEDOUT);
1821
1822 if(rcode){
1823 goto cleanup;
1824 }
1825
1826 if(sg_offset) {
1827
1828 u32 j;
1829
1830 struct sg_simple_element* sg;
1831 int sg_size;
1832
1833
1834 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1835
1836 if(get_user(size, &user_msg[0])){
1837 rcode = -EFAULT;
1838 goto cleanup;
1839 }
1840 size = size>>16;
1841 size *= 4;
1842 if (size > MAX_MESSAGE_SIZE) {
1843 rcode = -EINVAL;
1844 goto cleanup;
1845 }
1846
1847 if (copy_from_user (msg, user_msg, size)) {
1848 rcode = -EFAULT;
1849 goto cleanup;
1850 }
1851 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1852
1853
1854 sg = (struct sg_simple_element*)(msg + sg_offset);
1855 for (j = 0; j < sg_count; j++) {
1856
1857 if(! (sg[j].flag_count & 0x4000000 )) {
1858 sg_size = sg[j].flag_count & 0xffffff;
1859
1860 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1861 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1862 rcode = -EFAULT;
1863 goto cleanup;
1864 }
1865 }
1866 }
1867 }
1868
1869
1870 if (reply_size) {
1871
1872 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1873 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1874 rcode = -EFAULT;
1875 }
1876 if(copy_to_user(user_reply, reply, reply_size)) {
1877 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1878 rcode = -EFAULT;
1879 }
1880 }
1881
1882
1883cleanup:
1884 if (rcode != -ETIME && rcode != -EINTR) {
1885 struct sg_simple_element *sg =
1886 (struct sg_simple_element*) (msg +sg_offset);
1887 kfree (reply);
1888 while(sg_index) {
1889 if(sg_list[--sg_index]) {
1890 dma_free_coherent(&pHba->pDev->dev,
1891 sg[sg_index].flag_count & 0xffffff,
1892 sg_list[sg_index],
1893 sg[sg_index].addr_bus);
1894 }
1895 }
1896 }
1897 return rcode;
1898}
1899
1900#if defined __ia64__
1901static void adpt_ia64_info(sysInfo_S* si)
1902{
1903
1904
1905
1906 si->processorType = PROC_IA64;
1907}
1908#endif
1909
1910#if defined __sparc__
1911static void adpt_sparc_info(sysInfo_S* si)
1912{
1913
1914
1915
1916 si->processorType = PROC_ULTRASPARC;
1917}
1918#endif
1919#if defined __alpha__
1920static void adpt_alpha_info(sysInfo_S* si)
1921{
1922
1923
1924
1925 si->processorType = PROC_ALPHA;
1926}
1927#endif
1928
1929#if defined __i386__
1930
1931#include <uapi/asm/vm86.h>
1932
1933static void adpt_i386_info(sysInfo_S* si)
1934{
1935
1936
1937
1938 switch (boot_cpu_data.x86) {
1939 case CPU_386:
1940 si->processorType = PROC_386;
1941 break;
1942 case CPU_486:
1943 si->processorType = PROC_486;
1944 break;
1945 case CPU_586:
1946 si->processorType = PROC_PENTIUM;
1947 break;
1948 default:
1949 si->processorType = PROC_PENTIUM;
1950 break;
1951 }
1952}
1953#endif
1954
1955
1956
1957
1958
1959
1960
1961static int adpt_system_info(void __user *buffer)
1962{
1963 sysInfo_S si;
1964
1965 memset(&si, 0, sizeof(si));
1966
1967 si.osType = OS_LINUX;
1968 si.osMajorVersion = 0;
1969 si.osMinorVersion = 0;
1970 si.osRevision = 0;
1971 si.busType = SI_PCI_BUS;
1972 si.processorFamily = DPTI_sig.dsProcessorFamily;
1973
1974#if defined __i386__
1975 adpt_i386_info(&si);
1976#elif defined (__ia64__)
1977 adpt_ia64_info(&si);
1978#elif defined(__sparc__)
1979 adpt_sparc_info(&si);
1980#elif defined (__alpha__)
1981 adpt_alpha_info(&si);
1982#else
1983 si.processorType = 0xff ;
1984#endif
1985 if (copy_to_user(buffer, &si, sizeof(si))){
1986 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1987 return -EFAULT;
1988 }
1989
1990 return 0;
1991}
1992
1993static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1994{
1995 int minor;
1996 int error = 0;
1997 adpt_hba* pHba;
1998 ulong flags = 0;
1999 void __user *argp = (void __user *)arg;
2000
2001 minor = iminor(inode);
2002 if (minor >= DPTI_MAX_HBA){
2003 return -ENXIO;
2004 }
2005 mutex_lock(&adpt_configuration_lock);
2006 for (pHba = hba_chain; pHba; pHba = pHba->next) {
2007 if (pHba->unit == minor) {
2008 break;
2009 }
2010 }
2011 mutex_unlock(&adpt_configuration_lock);
2012 if(pHba == NULL){
2013 return -ENXIO;
2014 }
2015
2016 while((volatile u32) pHba->state & DPTI_STATE_RESET )
2017 schedule_timeout_uninterruptible(2);
2018
2019 switch (cmd) {
2020
2021 case DPT_SIGNATURE:
2022 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2023 return -EFAULT;
2024 }
2025 break;
2026 case I2OUSRCMD:
2027 return adpt_i2o_passthru(pHba, argp);
2028
2029 case DPT_CTRLINFO:{
2030 drvrHBAinfo_S HbaInfo;
2031
2032#define FLG_OSD_PCI_VALID 0x0001
2033#define FLG_OSD_DMA 0x0002
2034#define FLG_OSD_I2O 0x0004
2035 memset(&HbaInfo, 0, sizeof(HbaInfo));
2036 HbaInfo.drvrHBAnum = pHba->unit;
2037 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2038 HbaInfo.blinkState = adpt_read_blink_led(pHba);
2039 HbaInfo.pciBusNum = pHba->pDev->bus->number;
2040 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2041 HbaInfo.Interrupt = pHba->pDev->irq;
2042 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2043 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2044 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2045 return -EFAULT;
2046 }
2047 break;
2048 }
2049 case DPT_SYSINFO:
2050 return adpt_system_info(argp);
2051 case DPT_BLINKLED:{
2052 u32 value;
2053 value = (u32)adpt_read_blink_led(pHba);
2054 if (copy_to_user(argp, &value, sizeof(value))) {
2055 return -EFAULT;
2056 }
2057 break;
2058 }
2059 case I2ORESETCMD:
2060 if(pHba->host)
2061 spin_lock_irqsave(pHba->host->host_lock, flags);
2062 adpt_hba_reset(pHba);
2063 if(pHba->host)
2064 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2065 break;
2066 case I2ORESCANCMD:
2067 adpt_rescan(pHba);
2068 break;
2069 default:
2070 return -EINVAL;
2071 }
2072
2073 return error;
2074}
2075
2076static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2077{
2078 struct inode *inode;
2079 long ret;
2080
2081 inode = file_inode(file);
2082
2083 mutex_lock(&adpt_mutex);
2084 ret = adpt_ioctl(inode, file, cmd, arg);
2085 mutex_unlock(&adpt_mutex);
2086
2087 return ret;
2088}
2089
2090#ifdef CONFIG_COMPAT
2091static long compat_adpt_ioctl(struct file *file,
2092 unsigned int cmd, unsigned long arg)
2093{
2094 struct inode *inode;
2095 long ret;
2096
2097 inode = file_inode(file);
2098
2099 mutex_lock(&adpt_mutex);
2100
2101 switch(cmd) {
2102 case DPT_SIGNATURE:
2103 case I2OUSRCMD:
2104 case DPT_CTRLINFO:
2105 case DPT_SYSINFO:
2106 case DPT_BLINKLED:
2107 case I2ORESETCMD:
2108 case I2ORESCANCMD:
2109 case (DPT_TARGET_BUSY & 0xFFFF):
2110 case DPT_TARGET_BUSY:
2111 ret = adpt_ioctl(inode, file, cmd, arg);
2112 break;
2113 default:
2114 ret = -ENOIOCTLCMD;
2115 }
2116
2117 mutex_unlock(&adpt_mutex);
2118
2119 return ret;
2120}
2121#endif
2122
2123static irqreturn_t adpt_isr(int irq, void *dev_id)
2124{
2125 struct scsi_cmnd* cmd;
2126 adpt_hba* pHba = dev_id;
2127 u32 m;
2128 void __iomem *reply;
2129 u32 status=0;
2130 u32 context;
2131 ulong flags = 0;
2132 int handled = 0;
2133
2134 if (pHba == NULL){
2135 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2136 return IRQ_NONE;
2137 }
2138 if(pHba->host)
2139 spin_lock_irqsave(pHba->host->host_lock, flags);
2140
2141 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2142 m = readl(pHba->reply_port);
2143 if(m == EMPTY_QUEUE){
2144
2145 rmb();
2146 m = readl(pHba->reply_port);
2147 if(m == EMPTY_QUEUE){
2148
2149 printk(KERN_ERR"dpti: Could not get reply frame\n");
2150 goto out;
2151 }
2152 }
2153 if (pHba->reply_pool_pa <= m &&
2154 m < pHba->reply_pool_pa +
2155 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2156 reply = (u8 *)pHba->reply_pool +
2157 (m - pHba->reply_pool_pa);
2158 } else {
2159
2160 printk(KERN_ERR "dpti: reply frame not from pool\n");
2161 reply = (u8 *)bus_to_virt(m);
2162 }
2163
2164 if (readl(reply) & MSG_FAIL) {
2165 u32 old_m = readl(reply+28);
2166 void __iomem *msg;
2167 u32 old_context;
2168 PDEBUG("%s: Failed message\n",pHba->name);
2169 if(old_m >= 0x100000){
2170 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2171 writel(m,pHba->reply_port);
2172 continue;
2173 }
2174
2175 msg = pHba->msg_addr_virt + old_m;
2176 old_context = readl(msg+12);
2177 writel(old_context, reply+12);
2178 adpt_send_nop(pHba, old_m);
2179 }
2180 context = readl(reply+8);
2181 if(context & 0x40000000){
2182 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2183 if( p != NULL) {
2184 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2185 }
2186
2187 }
2188 if(context & 0x80000000){
2189 status = readl(reply+16);
2190 if(status >> 24){
2191 status &= 0xffff;
2192 } else {
2193 status = I2O_POST_WAIT_OK;
2194 }
2195 if(!(context & 0x40000000)) {
2196 cmd = adpt_cmd_from_context(pHba,
2197 readl(reply+12));
2198 if(cmd != NULL) {
2199 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2200 }
2201 }
2202 adpt_i2o_post_wait_complete(context, status);
2203 } else {
2204 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2205 if(cmd != NULL){
2206 scsi_dma_unmap(cmd);
2207 if(cmd->serial_number != 0) {
2208 adpt_i2o_to_scsi(reply, cmd);
2209 }
2210 }
2211 }
2212 writel(m, pHba->reply_port);
2213 wmb();
2214 rmb();
2215 }
2216 handled = 1;
2217out: if(pHba->host)
2218 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2219 return IRQ_RETVAL(handled);
2220}
2221
2222static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2223{
2224 int i;
2225 u32 msg[MAX_MESSAGE_SIZE];
2226 u32* mptr;
2227 u32* lptr;
2228 u32 *lenptr;
2229 int direction;
2230 int scsidir;
2231 int nseg;
2232 u32 len;
2233 u32 reqlen;
2234 s32 rcode;
2235 dma_addr_t addr;
2236
2237 memset(msg, 0 , sizeof(msg));
2238 len = scsi_bufflen(cmd);
2239 direction = 0x00000000;
2240
2241 scsidir = 0x00000000;
2242 if(len) {
2243
2244
2245
2246
2247
2248
2249 switch(cmd->sc_data_direction){
2250 case DMA_FROM_DEVICE:
2251 scsidir =0x40000000;
2252 break;
2253 case DMA_TO_DEVICE:
2254 direction=0x04000000;
2255 scsidir =0x80000000;
2256 break;
2257 case DMA_NONE:
2258 break;
2259 case DMA_BIDIRECTIONAL:
2260 scsidir =0x40000000;
2261
2262 break;
2263 default:
2264 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2265 pHba->name, cmd->cmnd[0]);
2266 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2267 cmd->scsi_done(cmd);
2268 return 0;
2269 }
2270 }
2271
2272
2273 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2274 msg[2] = 0;
2275 msg[3] = adpt_cmd_to_context(cmd);
2276
2277
2278 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2279 msg[5] = d->tid;
2280
2281
2282
2283
2284 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2285
2286 mptr=msg+7;
2287
2288
2289 memset(mptr, 0, 16);
2290 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2291 mptr+=4;
2292 lenptr=mptr++;
2293 if (dpt_dma64(pHba)) {
2294 reqlen = 16;
2295 *mptr++ = (0x7C<<24)+(2<<16)+0x02;
2296 *mptr++ = 1 << PAGE_SHIFT;
2297 } else {
2298 reqlen = 14;
2299 }
2300
2301
2302 nseg = scsi_dma_map(cmd);
2303 BUG_ON(nseg < 0);
2304 if (nseg) {
2305 struct scatterlist *sg;
2306
2307 len = 0;
2308 scsi_for_each_sg(cmd, sg, nseg, i) {
2309 lptr = mptr;
2310 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2311 len+=sg_dma_len(sg);
2312 addr = sg_dma_address(sg);
2313 *mptr++ = dma_low(addr);
2314 if (dpt_dma64(pHba))
2315 *mptr++ = dma_high(addr);
2316
2317 if (i == nseg - 1)
2318 *lptr = direction|0xD0000000|sg_dma_len(sg);
2319 }
2320 reqlen = mptr - msg;
2321 *lenptr = len;
2322
2323 if(cmd->underflow && len != cmd->underflow){
2324 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2325 len, cmd->underflow);
2326 }
2327 } else {
2328 *lenptr = len = 0;
2329 reqlen = 12;
2330 }
2331
2332
2333 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2334
2335
2336 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2337 if (rcode == 0) {
2338 return 0;
2339 }
2340 return rcode;
2341}
2342
2343
2344static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2345{
2346 struct Scsi_Host *host;
2347
2348 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2349 if (host == NULL) {
2350 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2351 return -1;
2352 }
2353 host->hostdata[0] = (unsigned long)pHba;
2354 pHba->host = host;
2355
2356 host->irq = pHba->pDev->irq;
2357
2358
2359
2360 host->io_port = 0;
2361 host->n_io_port = 0;
2362
2363 host->max_id = 16;
2364 host->max_lun = 256;
2365 host->max_channel = pHba->top_scsi_channel + 1;
2366 host->cmd_per_lun = 1;
2367 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2368 host->sg_tablesize = pHba->sg_tablesize;
2369 host->can_queue = pHba->post_fifo_size;
2370 host->use_cmd_list = 1;
2371
2372 return 0;
2373}
2374
2375
2376static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2377{
2378 adpt_hba* pHba;
2379 u32 hba_status;
2380 u32 dev_status;
2381 u32 reply_flags = readl(reply) & 0xff00;
2382
2383
2384
2385 u16 detailed_status = readl(reply+16) &0xffff;
2386 dev_status = (detailed_status & 0xff);
2387 hba_status = detailed_status >> 8;
2388
2389
2390 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2391
2392 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2393
2394 cmd->sense_buffer[0] = '\0';
2395
2396 if(!(reply_flags & MSG_FAIL)) {
2397 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2398 case I2O_SCSI_DSC_SUCCESS:
2399 cmd->result = (DID_OK << 16);
2400
2401 if (readl(reply+20) < cmd->underflow) {
2402 cmd->result = (DID_ERROR <<16);
2403 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2404 }
2405 break;
2406 case I2O_SCSI_DSC_REQUEST_ABORTED:
2407 cmd->result = (DID_ABORT << 16);
2408 break;
2409 case I2O_SCSI_DSC_PATH_INVALID:
2410 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2411 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2412 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2413 case I2O_SCSI_DSC_NO_ADAPTER:
2414 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2415 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2416 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2417 cmd->result = (DID_TIME_OUT << 16);
2418 break;
2419 case I2O_SCSI_DSC_ADAPTER_BUSY:
2420 case I2O_SCSI_DSC_BUS_BUSY:
2421 cmd->result = (DID_BUS_BUSY << 16);
2422 break;
2423 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2424 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2425 cmd->result = (DID_RESET << 16);
2426 break;
2427 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2428 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2429 cmd->result = (DID_PARITY << 16);
2430 break;
2431 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2432 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2433 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2434 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2435 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2436 case I2O_SCSI_DSC_DATA_OVERRUN:
2437 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2438 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2439 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2440 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2441 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2442 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2443 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2444 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2445 case I2O_SCSI_DSC_INVALID_CDB:
2446 case I2O_SCSI_DSC_LUN_INVALID:
2447 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2448 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2449 case I2O_SCSI_DSC_NO_NEXUS:
2450 case I2O_SCSI_DSC_CDB_RECEIVED:
2451 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2452 case I2O_SCSI_DSC_QUEUE_FROZEN:
2453 case I2O_SCSI_DSC_REQUEST_INVALID:
2454 default:
2455 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2456 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2457 hba_status, dev_status, cmd->cmnd[0]);
2458 cmd->result = (DID_ERROR << 16);
2459 break;
2460 }
2461
2462
2463
2464 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2465 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2466
2467 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2468 if(cmd->sense_buffer[0] == 0x70 &&
2469 cmd->sense_buffer[2] == DATA_PROTECT ){
2470
2471 cmd->result = (DID_TIME_OUT << 16);
2472 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2473 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2474 hba_status, dev_status, cmd->cmnd[0]);
2475
2476 }
2477 }
2478 } else {
2479
2480
2481
2482
2483 cmd->result = (DID_TIME_OUT << 16);
2484 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2485 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2486 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2487 }
2488
2489 cmd->result |= (dev_status);
2490
2491 if(cmd->scsi_done != NULL){
2492 cmd->scsi_done(cmd);
2493 }
2494 return cmd->result;
2495}
2496
2497
2498static s32 adpt_rescan(adpt_hba* pHba)
2499{
2500 s32 rcode;
2501 ulong flags = 0;
2502
2503 if(pHba->host)
2504 spin_lock_irqsave(pHba->host->host_lock, flags);
2505 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2506 goto out;
2507 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2508 goto out;
2509 rcode = 0;
2510out: if(pHba->host)
2511 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2512 return rcode;
2513}
2514
2515
2516static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2517{
2518 int i;
2519 int max;
2520 int tid;
2521 struct i2o_device *d;
2522 i2o_lct *lct = pHba->lct;
2523 u8 bus_no = 0;
2524 s16 scsi_id;
2525 u64 scsi_lun;
2526 u32 buf[10];
2527 struct adpt_device* pDev = NULL;
2528 struct i2o_device* pI2o_dev = NULL;
2529
2530 if (lct == NULL) {
2531 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2532 return -1;
2533 }
2534
2535 max = lct->table_size;
2536 max -= 3;
2537 max /= 9;
2538
2539
2540 for (d = pHba->devices; d; d = d->next) {
2541 pDev =(struct adpt_device*) d->owner;
2542 if(!pDev){
2543 continue;
2544 }
2545 pDev->state |= DPTI_DEV_UNSCANNED;
2546 }
2547
2548 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2549
2550 for(i=0;i<max;i++) {
2551 if( lct->lct_entry[i].user_tid != 0xfff){
2552 continue;
2553 }
2554
2555 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2556 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2557 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2558 tid = lct->lct_entry[i].tid;
2559 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2560 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2561 continue;
2562 }
2563 bus_no = buf[0]>>16;
2564 if (bus_no >= MAX_CHANNEL) {
2565 printk(KERN_WARNING
2566 "%s: Channel number %d out of range\n",
2567 pHba->name, bus_no);
2568 continue;
2569 }
2570
2571 scsi_id = buf[1];
2572 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2573 pDev = pHba->channel[bus_no].device[scsi_id];
2574
2575 while(pDev) {
2576 if(pDev->scsi_lun == scsi_lun) {
2577 break;
2578 }
2579 pDev = pDev->next_lun;
2580 }
2581 if(!pDev ) {
2582 d = kmalloc(sizeof(struct i2o_device),
2583 GFP_ATOMIC);
2584 if(d==NULL)
2585 {
2586 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2587 return -ENOMEM;
2588 }
2589
2590 d->controller = pHba;
2591 d->next = NULL;
2592
2593 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2594
2595 d->flags = 0;
2596 adpt_i2o_report_hba_unit(pHba, d);
2597 adpt_i2o_install_device(pHba, d);
2598
2599 pDev = pHba->channel[bus_no].device[scsi_id];
2600 if( pDev == NULL){
2601 pDev =
2602 kzalloc(sizeof(struct adpt_device),
2603 GFP_ATOMIC);
2604 if(pDev == NULL) {
2605 return -ENOMEM;
2606 }
2607 pHba->channel[bus_no].device[scsi_id] = pDev;
2608 } else {
2609 while (pDev->next_lun) {
2610 pDev = pDev->next_lun;
2611 }
2612 pDev = pDev->next_lun =
2613 kzalloc(sizeof(struct adpt_device),
2614 GFP_ATOMIC);
2615 if(pDev == NULL) {
2616 return -ENOMEM;
2617 }
2618 }
2619 pDev->tid = d->lct_data.tid;
2620 pDev->scsi_channel = bus_no;
2621 pDev->scsi_id = scsi_id;
2622 pDev->scsi_lun = scsi_lun;
2623 pDev->pI2o_dev = d;
2624 d->owner = pDev;
2625 pDev->type = (buf[0])&0xff;
2626 pDev->flags = (buf[0]>>8)&0xff;
2627
2628 if(scsi_id > pHba->top_scsi_id){
2629 pHba->top_scsi_id = scsi_id;
2630 }
2631 if(scsi_lun > pHba->top_scsi_lun){
2632 pHba->top_scsi_lun = scsi_lun;
2633 }
2634 continue;
2635 }
2636
2637
2638 while(pDev) {
2639 if(pDev->scsi_lun == scsi_lun) {
2640 if(!scsi_device_online(pDev->pScsi_dev)) {
2641 printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2642 pHba->name,bus_no,scsi_id,scsi_lun);
2643 if (pDev->pScsi_dev) {
2644 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2645 }
2646 }
2647 d = pDev->pI2o_dev;
2648 if(d->lct_data.tid != tid) {
2649 pDev->tid = tid;
2650 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2651 if (pDev->pScsi_dev) {
2652 pDev->pScsi_dev->changed = TRUE;
2653 pDev->pScsi_dev->removable = TRUE;
2654 }
2655 }
2656
2657 pDev->state = DPTI_DEV_ONLINE;
2658 break;
2659 }
2660 pDev = pDev->next_lun;
2661 }
2662 }
2663 }
2664 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2665 pDev =(struct adpt_device*) pI2o_dev->owner;
2666 if(!pDev){
2667 continue;
2668 }
2669
2670
2671 if (pDev->state & DPTI_DEV_UNSCANNED){
2672 pDev->state = DPTI_DEV_OFFLINE;
2673 printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2674 if (pDev->pScsi_dev) {
2675 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2676 }
2677 }
2678 }
2679 return 0;
2680}
2681
2682static void adpt_fail_posted_scbs(adpt_hba* pHba)
2683{
2684 struct scsi_cmnd* cmd = NULL;
2685 struct scsi_device* d = NULL;
2686
2687 shost_for_each_device(d, pHba->host) {
2688 unsigned long flags;
2689 spin_lock_irqsave(&d->list_lock, flags);
2690 list_for_each_entry(cmd, &d->cmd_list, list) {
2691 if(cmd->serial_number == 0){
2692 continue;
2693 }
2694 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2695 cmd->scsi_done(cmd);
2696 }
2697 spin_unlock_irqrestore(&d->list_lock, flags);
2698 }
2699}
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712static int adpt_i2o_activate_hba(adpt_hba* pHba)
2713{
2714 int rcode;
2715
2716 if(pHba->initialized ) {
2717 if (adpt_i2o_status_get(pHba) < 0) {
2718 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2719 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2720 return rcode;
2721 }
2722 if (adpt_i2o_status_get(pHba) < 0) {
2723 printk(KERN_INFO "HBA not responding.\n");
2724 return -1;
2725 }
2726 }
2727
2728 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2729 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2730 return -1;
2731 }
2732
2733 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2734 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2735 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2736 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2737 adpt_i2o_reset_hba(pHba);
2738 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2739 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2740 return -1;
2741 }
2742 }
2743 } else {
2744 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2745 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2746 return rcode;
2747 }
2748
2749 }
2750
2751 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2752 return -1;
2753 }
2754
2755
2756
2757 if (adpt_i2o_hrt_get(pHba) < 0) {
2758 return -1;
2759 }
2760
2761 return 0;
2762}
2763
2764
2765
2766
2767
2768static int adpt_i2o_online_hba(adpt_hba* pHba)
2769{
2770 if (adpt_i2o_systab_send(pHba) < 0) {
2771 adpt_i2o_delete_hba(pHba);
2772 return -1;
2773 }
2774
2775
2776 if (adpt_i2o_enable_hba(pHba) < 0) {
2777 adpt_i2o_delete_hba(pHba);
2778 return -1;
2779 }
2780
2781
2782 return 0;
2783}
2784
2785static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2786{
2787 u32 __iomem *msg;
2788 ulong timeout = jiffies + 5*HZ;
2789
2790 while(m == EMPTY_QUEUE){
2791 rmb();
2792 m = readl(pHba->post_port);
2793 if(m != EMPTY_QUEUE){
2794 break;
2795 }
2796 if(time_after(jiffies,timeout)){
2797 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2798 return 2;
2799 }
2800 schedule_timeout_uninterruptible(1);
2801 }
2802 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2803 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2804 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2805 writel( 0,&msg[2]);
2806 wmb();
2807
2808 writel(m, pHba->post_port);
2809 wmb();
2810 return 0;
2811}
2812
2813static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2814{
2815 u8 *status;
2816 dma_addr_t addr;
2817 u32 __iomem *msg = NULL;
2818 int i;
2819 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2820 u32 m;
2821
2822 do {
2823 rmb();
2824 m = readl(pHba->post_port);
2825 if (m != EMPTY_QUEUE) {
2826 break;
2827 }
2828
2829 if(time_after(jiffies,timeout)){
2830 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2831 return -ETIMEDOUT;
2832 }
2833 schedule_timeout_uninterruptible(1);
2834 } while(m == EMPTY_QUEUE);
2835
2836 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2837
2838 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2839 if (!status) {
2840 adpt_send_nop(pHba, m);
2841 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2842 pHba->name);
2843 return -ENOMEM;
2844 }
2845 memset(status, 0, 4);
2846
2847 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2848 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2849 writel(0, &msg[2]);
2850 writel(0x0106, &msg[3]);
2851 writel(4096, &msg[4]);
2852 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]);
2853 writel(0xD0000004, &msg[6]);
2854 writel((u32)addr, &msg[7]);
2855
2856 writel(m, pHba->post_port);
2857 wmb();
2858
2859
2860 do {
2861 if (*status) {
2862 if (*status != 0x01 ) {
2863 break;
2864 }
2865 }
2866 rmb();
2867 if(time_after(jiffies,timeout)){
2868 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2869
2870
2871
2872
2873 return -ETIMEDOUT;
2874 }
2875 schedule_timeout_uninterruptible(1);
2876 } while (1);
2877
2878
2879
2880 if(*status != 0x04 ) {
2881 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2882 return -2;
2883 }
2884 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2885
2886 if(pHba->reply_pool != NULL) {
2887 dma_free_coherent(&pHba->pDev->dev,
2888 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2889 pHba->reply_pool, pHba->reply_pool_pa);
2890 }
2891
2892 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2893 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2894 &pHba->reply_pool_pa, GFP_KERNEL);
2895 if (!pHba->reply_pool) {
2896 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2897 return -ENOMEM;
2898 }
2899 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2900
2901 for(i = 0; i < pHba->reply_fifo_size; i++) {
2902 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2903 pHba->reply_port);
2904 wmb();
2905 }
2906 adpt_i2o_status_get(pHba);
2907 return 0;
2908}
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922static s32 adpt_i2o_status_get(adpt_hba* pHba)
2923{
2924 ulong timeout;
2925 u32 m;
2926 u32 __iomem *msg;
2927 u8 *status_block=NULL;
2928
2929 if(pHba->status_block == NULL) {
2930 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2931 sizeof(i2o_status_block),
2932 &pHba->status_block_pa, GFP_KERNEL);
2933 if(pHba->status_block == NULL) {
2934 printk(KERN_ERR
2935 "dpti%d: Get Status Block failed; Out of memory. \n",
2936 pHba->unit);
2937 return -ENOMEM;
2938 }
2939 }
2940 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2941 status_block = (u8*)(pHba->status_block);
2942 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2943 do {
2944 rmb();
2945 m = readl(pHba->post_port);
2946 if (m != EMPTY_QUEUE) {
2947 break;
2948 }
2949 if(time_after(jiffies,timeout)){
2950 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2951 pHba->name);
2952 return -ETIMEDOUT;
2953 }
2954 schedule_timeout_uninterruptible(1);
2955 } while(m==EMPTY_QUEUE);
2956
2957
2958 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2959
2960 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2961 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2962 writel(1, &msg[2]);
2963 writel(0, &msg[3]);
2964 writel(0, &msg[4]);
2965 writel(0, &msg[5]);
2966 writel( dma_low(pHba->status_block_pa), &msg[6]);
2967 writel( dma_high(pHba->status_block_pa), &msg[7]);
2968 writel(sizeof(i2o_status_block), &msg[8]);
2969
2970
2971 writel(m, pHba->post_port);
2972 wmb();
2973
2974 while(status_block[87]!=0xff){
2975 if(time_after(jiffies,timeout)){
2976 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2977 pHba->unit);
2978 return -ETIMEDOUT;
2979 }
2980 rmb();
2981 schedule_timeout_uninterruptible(1);
2982 }
2983
2984
2985 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2986 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2987 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2988 }
2989
2990 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2991 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2992 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2993 }
2994
2995
2996 if (dpt_dma64(pHba)) {
2997 pHba->sg_tablesize
2998 = ((pHba->status_block->inbound_frame_size * 4
2999 - 14 * sizeof(u32))
3000 / (sizeof(struct sg_simple_element) + sizeof(u32)));
3001 } else {
3002 pHba->sg_tablesize
3003 = ((pHba->status_block->inbound_frame_size * 4
3004 - 12 * sizeof(u32))
3005 / sizeof(struct sg_simple_element));
3006 }
3007 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3008 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3009 }
3010
3011
3012#ifdef DEBUG
3013 printk("dpti%d: State = ",pHba->unit);
3014 switch(pHba->status_block->iop_state) {
3015 case 0x01:
3016 printk("INIT\n");
3017 break;
3018 case 0x02:
3019 printk("RESET\n");
3020 break;
3021 case 0x04:
3022 printk("HOLD\n");
3023 break;
3024 case 0x05:
3025 printk("READY\n");
3026 break;
3027 case 0x08:
3028 printk("OPERATIONAL\n");
3029 break;
3030 case 0x10:
3031 printk("FAILED\n");
3032 break;
3033 case 0x11:
3034 printk("FAULTED\n");
3035 break;
3036 default:
3037 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3038 }
3039#endif
3040 return 0;
3041}
3042
3043
3044
3045
3046static int adpt_i2o_lct_get(adpt_hba* pHba)
3047{
3048 u32 msg[8];
3049 int ret;
3050 u32 buf[16];
3051
3052 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3053 pHba->lct_size = pHba->status_block->expected_lct_size;
3054 }
3055 do {
3056 if (pHba->lct == NULL) {
3057 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3058 pHba->lct_size, &pHba->lct_pa,
3059 GFP_ATOMIC);
3060 if(pHba->lct == NULL) {
3061 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3062 pHba->name);
3063 return -ENOMEM;
3064 }
3065 }
3066 memset(pHba->lct, 0, pHba->lct_size);
3067
3068 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3069 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3070 msg[2] = 0;
3071 msg[3] = 0;
3072 msg[4] = 0xFFFFFFFF;
3073 msg[5] = 0x00000000;
3074 msg[6] = 0xD0000000|pHba->lct_size;
3075 msg[7] = (u32)pHba->lct_pa;
3076
3077 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3078 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3079 pHba->name, ret);
3080 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3081 return ret;
3082 }
3083
3084 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3085 pHba->lct_size = pHba->lct->table_size << 2;
3086 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3087 pHba->lct, pHba->lct_pa);
3088 pHba->lct = NULL;
3089 }
3090 } while (pHba->lct == NULL);
3091
3092 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3093
3094
3095
3096 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3097 pHba->FwDebugBufferSize = buf[1];
3098 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3099 pHba->FwDebugBufferSize);
3100 if (pHba->FwDebugBuffer_P) {
3101 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3102 FW_DEBUG_FLAGS_OFFSET;
3103 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3104 FW_DEBUG_BLED_OFFSET;
3105 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3106 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3107 FW_DEBUG_STR_LENGTH_OFFSET;
3108 pHba->FwDebugBuffer_P += buf[2];
3109 pHba->FwDebugFlags = 0;
3110 }
3111 }
3112
3113 return 0;
3114}
3115
3116static int adpt_i2o_build_sys_table(void)
3117{
3118 adpt_hba* pHba = hba_chain;
3119 int count = 0;
3120
3121 if (sys_tbl)
3122 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3123 sys_tbl, sys_tbl_pa);
3124
3125 sys_tbl_len = sizeof(struct i2o_sys_tbl) +
3126 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3127
3128 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3129 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3130 if (!sys_tbl) {
3131 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3132 return -ENOMEM;
3133 }
3134 memset(sys_tbl, 0, sys_tbl_len);
3135
3136 sys_tbl->num_entries = hba_count;
3137 sys_tbl->version = I2OVERSION;
3138 sys_tbl->change_ind = sys_tbl_ind++;
3139
3140 for(pHba = hba_chain; pHba; pHba = pHba->next) {
3141 u64 addr;
3142
3143 if (adpt_i2o_status_get(pHba)) {
3144 sys_tbl->num_entries--;
3145 continue;
3146 }
3147
3148 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3149 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3150 sys_tbl->iops[count].seg_num = 0;
3151 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3152 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3153 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3154 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3155 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1;
3156 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3157 addr = pHba->base_addr_phys + 0x40;
3158 sys_tbl->iops[count].inbound_low = dma_low(addr);
3159 sys_tbl->iops[count].inbound_high = dma_high(addr);
3160
3161 count++;
3162 }
3163
3164#ifdef DEBUG
3165{
3166 u32 *table = (u32*)sys_tbl;
3167 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3168 for(count = 0; count < (sys_tbl_len >>2); count++) {
3169 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3170 count, table[count]);
3171 }
3172}
3173#endif
3174
3175 return 0;
3176}
3177
3178
3179
3180
3181
3182
3183static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3184{
3185 char buf[64];
3186 int unit = d->lct_data.tid;
3187
3188 printk(KERN_INFO "TID %3.3d ", unit);
3189
3190 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3191 {
3192 buf[16]=0;
3193 printk(" Vendor: %-12.12s", buf);
3194 }
3195 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3196 {
3197 buf[16]=0;
3198 printk(" Device: %-12.12s", buf);
3199 }
3200 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3201 {
3202 buf[8]=0;
3203 printk(" Rev: %-12.12s\n", buf);
3204 }
3205#ifdef DEBUG
3206 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3207 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3208 printk(KERN_INFO "\tFlags: ");
3209
3210 if(d->lct_data.device_flags&(1<<0))
3211 printk("C");
3212 if(d->lct_data.device_flags&(1<<1))
3213 printk("U");
3214 if(!(d->lct_data.device_flags&(1<<4)))
3215 printk("P");
3216 if(!(d->lct_data.device_flags&(1<<5)))
3217 printk("M");
3218 printk("\n");
3219#endif
3220}
3221
3222#ifdef DEBUG
3223
3224
3225
3226static const char *adpt_i2o_get_class_name(int class)
3227{
3228 int idx = 16;
3229 static char *i2o_class_name[] = {
3230 "Executive",
3231 "Device Driver Module",
3232 "Block Device",
3233 "Tape Device",
3234 "LAN Interface",
3235 "WAN Interface",
3236 "Fibre Channel Port",
3237 "Fibre Channel Device",
3238 "SCSI Device",
3239 "ATE Port",
3240 "ATE Device",
3241 "Floppy Controller",
3242 "Floppy Device",
3243 "Secondary Bus Port",
3244 "Peer Transport Agent",
3245 "Peer Transport",
3246 "Unknown"
3247 };
3248
3249 switch(class&0xFFF) {
3250 case I2O_CLASS_EXECUTIVE:
3251 idx = 0; break;
3252 case I2O_CLASS_DDM:
3253 idx = 1; break;
3254 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3255 idx = 2; break;
3256 case I2O_CLASS_SEQUENTIAL_STORAGE:
3257 idx = 3; break;
3258 case I2O_CLASS_LAN:
3259 idx = 4; break;
3260 case I2O_CLASS_WAN:
3261 idx = 5; break;
3262 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3263 idx = 6; break;
3264 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3265 idx = 7; break;
3266 case I2O_CLASS_SCSI_PERIPHERAL:
3267 idx = 8; break;
3268 case I2O_CLASS_ATE_PORT:
3269 idx = 9; break;
3270 case I2O_CLASS_ATE_PERIPHERAL:
3271 idx = 10; break;
3272 case I2O_CLASS_FLOPPY_CONTROLLER:
3273 idx = 11; break;
3274 case I2O_CLASS_FLOPPY_DEVICE:
3275 idx = 12; break;
3276 case I2O_CLASS_BUS_ADAPTER_PORT:
3277 idx = 13; break;
3278 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3279 idx = 14; break;
3280 case I2O_CLASS_PEER_TRANSPORT:
3281 idx = 15; break;
3282 }
3283 return i2o_class_name[idx];
3284}
3285#endif
3286
3287
3288static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3289{
3290 u32 msg[6];
3291 int ret, size = sizeof(i2o_hrt);
3292
3293 do {
3294 if (pHba->hrt == NULL) {
3295 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3296 size, &pHba->hrt_pa, GFP_KERNEL);
3297 if (pHba->hrt == NULL) {
3298 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3299 return -ENOMEM;
3300 }
3301 }
3302
3303 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3304 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3305 msg[2]= 0;
3306 msg[3]= 0;
3307 msg[4]= (0xD0000000 | size);
3308 msg[5]= (u32)pHba->hrt_pa;
3309
3310 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3311 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3312 return ret;
3313 }
3314
3315 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3316 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3317 dma_free_coherent(&pHba->pDev->dev, size,
3318 pHba->hrt, pHba->hrt_pa);
3319 size = newsize;
3320 pHba->hrt = NULL;
3321 }
3322 } while(pHba->hrt == NULL);
3323 return 0;
3324}
3325
3326
3327
3328
3329static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3330 int group, int field, void *buf, int buflen)
3331{
3332 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3333 u8 *opblk_va;
3334 dma_addr_t opblk_pa;
3335 u8 *resblk_va;
3336 dma_addr_t resblk_pa;
3337
3338 int size;
3339
3340
3341 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3342 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3343 if (resblk_va == NULL) {
3344 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3345 return -ENOMEM;
3346 }
3347
3348 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3349 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3350 if (opblk_va == NULL) {
3351 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3352 resblk_va, resblk_pa);
3353 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3354 pHba->name);
3355 return -ENOMEM;
3356 }
3357 if (field == -1)
3358 opblk[4] = -1;
3359
3360 memcpy(opblk_va, opblk, sizeof(opblk));
3361 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3362 opblk_va, opblk_pa, sizeof(opblk),
3363 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3364 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3365 if (size == -ETIME) {
3366 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3367 resblk_va, resblk_pa);
3368 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3369 return -ETIME;
3370 } else if (size == -EINTR) {
3371 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3372 resblk_va, resblk_pa);
3373 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3374 return -EINTR;
3375 }
3376
3377 memcpy(buf, resblk_va+8, buflen);
3378
3379 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3380 resblk_va, resblk_pa);
3381 if (size < 0)
3382 return size;
3383
3384 return buflen;
3385}
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3397 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3398 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3399{
3400 u32 msg[9];
3401 u32 *res = (u32 *)resblk_va;
3402 int wait_status;
3403
3404 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3405 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3406 msg[2] = 0;
3407 msg[3] = 0;
3408 msg[4] = 0;
3409 msg[5] = 0x54000000 | oplen;
3410 msg[6] = (u32)opblk_pa;
3411 msg[7] = 0xD0000000 | reslen;
3412 msg[8] = (u32)resblk_pa;
3413
3414 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3415 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3416 return wait_status;
3417 }
3418
3419 if (res[1]&0x00FF0000) {
3420 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3421 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3422 pHba->name,
3423 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3424 : "PARAMS_GET",
3425 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3426 return -((res[1] >> 16) & 0xFF);
3427 }
3428
3429 return 4 + ((res[1] & 0x0000FFFF) << 2);
3430}
3431
3432
3433static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3434{
3435 u32 msg[4];
3436 int ret;
3437
3438 adpt_i2o_status_get(pHba);
3439
3440
3441
3442 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3443 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3444 return 0;
3445 }
3446
3447 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3448 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3449 msg[2] = 0;
3450 msg[3] = 0;
3451
3452 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3453 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3454 pHba->unit, -ret);
3455 } else {
3456 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3457 }
3458
3459 adpt_i2o_status_get(pHba);
3460 return ret;
3461}
3462
3463
3464
3465
3466
3467static int adpt_i2o_enable_hba(adpt_hba* pHba)
3468{
3469 u32 msg[4];
3470 int ret;
3471
3472 adpt_i2o_status_get(pHba);
3473 if(!pHba->status_block){
3474 return -ENOMEM;
3475 }
3476
3477 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3478 return 0;
3479
3480 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3481 return -EINVAL;
3482
3483 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3484 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3485 msg[2]= 0;
3486 msg[3]= 0;
3487
3488 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3489 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3490 pHba->name, ret);
3491 } else {
3492 PDEBUG("%s: Enabled.\n", pHba->name);
3493 }
3494
3495 adpt_i2o_status_get(pHba);
3496 return ret;
3497}
3498
3499
3500static int adpt_i2o_systab_send(adpt_hba* pHba)
3501{
3502 u32 msg[12];
3503 int ret;
3504
3505 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3506 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3507 msg[2] = 0;
3508 msg[3] = 0;
3509 msg[4] = (0<<16) | ((pHba->unit+2) << 12);
3510 msg[5] = 0;
3511
3512
3513
3514
3515
3516
3517 msg[6] = 0x54000000 | sys_tbl_len;
3518 msg[7] = (u32)sys_tbl_pa;
3519 msg[8] = 0x54000000 | 0;
3520 msg[9] = 0;
3521 msg[10] = 0xD4000000 | 0;
3522 msg[11] = 0;
3523
3524 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3525 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3526 pHba->name, ret);
3527 }
3528#ifdef DEBUG
3529 else {
3530 PINFO("%s: SysTab set.\n", pHba->name);
3531 }
3532#endif
3533
3534 return ret;
3535 }
3536
3537
3538
3539
3540
3541
3542
3543
3544#ifdef UARTDELAY
3545
3546static static void adpt_delay(int millisec)
3547{
3548 int i;
3549 for (i = 0; i < millisec; i++) {
3550 udelay(1000);
3551 }
3552}
3553
3554#endif
3555
3556static struct scsi_host_template driver_template = {
3557 .module = THIS_MODULE,
3558 .name = "dpt_i2o",
3559 .proc_name = "dpt_i2o",
3560 .show_info = adpt_show_info,
3561 .info = adpt_info,
3562 .queuecommand = adpt_queue,
3563 .eh_abort_handler = adpt_abort,
3564 .eh_device_reset_handler = adpt_device_reset,
3565 .eh_bus_reset_handler = adpt_bus_reset,
3566 .eh_host_reset_handler = adpt_reset,
3567 .bios_param = adpt_bios_param,
3568 .slave_configure = adpt_slave_configure,
3569 .can_queue = MAX_TO_IOP_MESSAGES,
3570 .this_id = 7,
3571 .use_clustering = ENABLE_CLUSTERING,
3572};
3573
3574static int __init adpt_init(void)
3575{
3576 int error;
3577 adpt_hba *pHba, *next;
3578
3579 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3580
3581 error = adpt_detect(&driver_template);
3582 if (error < 0)
3583 return error;
3584 if (hba_chain == NULL)
3585 return -ENODEV;
3586
3587 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3588 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3589 if (error)
3590 goto fail;
3591 scsi_scan_host(pHba->host);
3592 }
3593 return 0;
3594fail:
3595 for (pHba = hba_chain; pHba; pHba = next) {
3596 next = pHba->next;
3597 scsi_remove_host(pHba->host);
3598 }
3599 return error;
3600}
3601
3602static void __exit adpt_exit(void)
3603{
3604 adpt_hba *pHba, *next;
3605
3606 for (pHba = hba_chain; pHba; pHba = pHba->next)
3607 scsi_remove_host(pHba->host);
3608 for (pHba = hba_chain; pHba; pHba = next) {
3609 next = pHba->next;
3610 adpt_release(pHba->host);
3611 }
3612}
3613
3614module_init(adpt_init);
3615module_exit(adpt_exit);
3616
3617MODULE_LICENSE("GPL");
3618