1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/module.h>
33
34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36
37
38
39#include <linux/ioctl.h>
40#include <asm/uaccess.h>
41
42#include <linux/stat.h>
43#include <linux/slab.h>
44#include <linux/pci.h>
45#include <linux/proc_fs.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h>
48#include <linux/interrupt.h>
49#include <linux/kernel.h>
50#include <linux/sched.h>
51#include <linux/reboot.h>
52#include <linux/spinlock.h>
53#include <linux/dma-mapping.h>
54
55#include <linux/timer.h>
56#include <linux/string.h>
57#include <linux/ioport.h>
58#include <linux/mutex.h>
59
60#include <asm/processor.h>
61#include <asm/pgtable.h>
62#include <asm/io.h>
63
64#include <scsi/scsi.h>
65#include <scsi/scsi_cmnd.h>
66#include <scsi/scsi_device.h>
67#include <scsi/scsi_host.h>
68#include <scsi/scsi_tcq.h>
69
70#include "dpt/dptsig.h"
71#include "dpti.h"
72
73
74
75
76
77
78static DEFINE_MUTEX(adpt_mutex);
79static dpt_sig_S DPTI_sig = {
80 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81#ifdef __i386__
82 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83#elif defined(__ia64__)
84 PROC_INTEL, PROC_IA64,
85#elif defined(__sparc__)
86 PROC_ULTRASPARC, PROC_ULTRASPARC,
87#elif defined(__alpha__)
88 PROC_ALPHA, PROC_ALPHA,
89#else
90 (-1),(-1),
91#endif
92 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
95};
96
97
98
99
100
101
102
103
104
105static DEFINE_MUTEX(adpt_configuration_lock);
106
107static struct i2o_sys_tbl *sys_tbl;
108static dma_addr_t sys_tbl_pa;
109static int sys_tbl_ind;
110static int sys_tbl_len;
111
112static adpt_hba* hba_chain = NULL;
113static int hba_count = 0;
114
115static struct class *adpt_sysfs_class;
116
117static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
118#ifdef CONFIG_COMPAT
119static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
120#endif
121
122static const struct file_operations adpt_fops = {
123 .unlocked_ioctl = adpt_unlocked_ioctl,
124 .open = adpt_open,
125 .release = adpt_close,
126#ifdef CONFIG_COMPAT
127 .compat_ioctl = compat_adpt_ioctl,
128#endif
129 .llseek = noop_llseek,
130};
131
132
133
134
135struct adpt_i2o_post_wait_data
136{
137 int status;
138 u32 id;
139 adpt_wait_queue_head_t *wq;
140 struct adpt_i2o_post_wait_data *next;
141};
142
143static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
144static u32 adpt_post_wait_id = 0;
145static DEFINE_SPINLOCK(adpt_post_wait_lock);
146
147
148
149
150
151
152
153static inline int dpt_dma64(adpt_hba *pHba)
154{
155 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
156}
157
158static inline u32 dma_high(dma_addr_t addr)
159{
160 return upper_32_bits(addr);
161}
162
163static inline u32 dma_low(dma_addr_t addr)
164{
165 return (u32)addr;
166}
167
168static u8 adpt_read_blink_led(adpt_hba* host)
169{
170 if (host->FwDebugBLEDflag_P) {
171 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
172 return readb(host->FwDebugBLEDvalue_P);
173 }
174 }
175 return 0;
176}
177
178
179
180
181
182
183static struct pci_device_id dptids[] = {
184 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
185 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
186 { 0, }
187};
188MODULE_DEVICE_TABLE(pci,dptids);
189
190static int adpt_detect(struct scsi_host_template* sht)
191{
192 struct pci_dev *pDev = NULL;
193 adpt_hba *pHba;
194 adpt_hba *next;
195
196 PINFO("Detecting Adaptec I2O RAID controllers...\n");
197
198
199 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
200 if(pDev->device == PCI_DPT_DEVICE_ID ||
201 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
202 if(adpt_install_hba(sht, pDev) ){
203 PERROR("Could not Init an I2O RAID device\n");
204 PERROR("Will not try to detect others.\n");
205 return hba_count-1;
206 }
207 pci_dev_get(pDev);
208 }
209 }
210
211
212 for (pHba = hba_chain; pHba; pHba = next) {
213 next = pHba->next;
214
215 if (adpt_i2o_activate_hba(pHba) < 0) {
216 adpt_i2o_delete_hba(pHba);
217 }
218 }
219
220
221
222
223rebuild_sys_tab:
224 if (hba_chain == NULL)
225 return 0;
226
227
228
229
230
231 if (adpt_i2o_build_sys_table() < 0) {
232 adpt_i2o_sys_shutdown();
233 return 0;
234 }
235
236 PDEBUG("HBA's in HOLD state\n");
237
238
239 for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 if (adpt_i2o_online_hba(pHba) < 0) {
241 adpt_i2o_delete_hba(pHba);
242 goto rebuild_sys_tab;
243 }
244 }
245
246
247 PDEBUG("HBA's in OPERATIONAL state\n");
248
249 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
250 for (pHba = hba_chain; pHba; pHba = next) {
251 next = pHba->next;
252 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 if (adpt_i2o_lct_get(pHba) < 0){
254 adpt_i2o_delete_hba(pHba);
255 continue;
256 }
257
258 if (adpt_i2o_parse_lct(pHba) < 0){
259 adpt_i2o_delete_hba(pHba);
260 continue;
261 }
262 adpt_inquiry(pHba);
263 }
264
265 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 if (IS_ERR(adpt_sysfs_class)) {
267 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 adpt_sysfs_class = NULL;
269 }
270
271 for (pHba = hba_chain; pHba; pHba = next) {
272 next = pHba->next;
273 if (adpt_scsi_host_alloc(pHba, sht) < 0){
274 adpt_i2o_delete_hba(pHba);
275 continue;
276 }
277 pHba->initialized = TRUE;
278 pHba->state &= ~DPTI_STATE_RESET;
279 if (adpt_sysfs_class) {
280 struct device *dev = device_create(adpt_sysfs_class,
281 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
282 "dpti%d", pHba->unit);
283 if (IS_ERR(dev)) {
284 printk(KERN_WARNING"dpti%d: unable to "
285 "create device in dpt_i2o class\n",
286 pHba->unit);
287 }
288 }
289 }
290
291
292
293
294 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
295 adpt_i2o_sys_shutdown();
296 return 0;
297 }
298 return hba_count;
299}
300
301
302
303
304
305static int adpt_release(struct Scsi_Host *host)
306{
307 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
308
309 adpt_i2o_delete_hba(pHba);
310 scsi_unregister(host);
311 return 0;
312}
313
314
315static void adpt_inquiry(adpt_hba* pHba)
316{
317 u32 msg[17];
318 u32 *mptr;
319 u32 *lenptr;
320 int direction;
321 int scsidir;
322 u32 len;
323 u32 reqlen;
324 u8* buf;
325 dma_addr_t addr;
326 u8 scb[16];
327 s32 rcode;
328
329 memset(msg, 0, sizeof(msg));
330 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
331 if(!buf){
332 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
333 return;
334 }
335 memset((void*)buf, 0, 36);
336
337 len = 36;
338 direction = 0x00000000;
339 scsidir =0x40000000;
340
341 if (dpt_dma64(pHba))
342 reqlen = 17;
343 else
344 reqlen = 14;
345
346 msg[0] = reqlen<<16 | SGL_OFFSET_12;
347 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
348 msg[2] = 0;
349 msg[3] = 0;
350
351 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
352 msg[5] = ADAPTER_TID | 1<<16 ;
353
354
355
356
357 msg[6] = scsidir|0x20a00000| 6 ;
358
359 mptr=msg+7;
360
361 memset(scb, 0, sizeof(scb));
362
363 scb[0] = INQUIRY;
364 scb[1] = 0;
365 scb[2] = 0;
366 scb[3] = 0;
367 scb[4] = 36;
368 scb[5] = 0;
369
370
371 memcpy(mptr, scb, sizeof(scb));
372 mptr+=4;
373 lenptr=mptr++;
374
375
376 *lenptr = len;
377 if (dpt_dma64(pHba)) {
378 *mptr++ = (0x7C<<24)+(2<<16)+0x02;
379 *mptr++ = 1 << PAGE_SHIFT;
380 *mptr++ = 0xD0000000|direction|len;
381 *mptr++ = dma_low(addr);
382 *mptr++ = dma_high(addr);
383 } else {
384 *mptr++ = 0xD0000000|direction|len;
385 *mptr++ = addr;
386 }
387
388
389 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
390 if (rcode != 0) {
391 sprintf(pHba->detail, "Adaptec I2O RAID");
392 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
393 if (rcode != -ETIME && rcode != -EINTR)
394 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
395 } else {
396 memset(pHba->detail, 0, sizeof(pHba->detail));
397 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
398 memcpy(&(pHba->detail[16]), " Model: ", 8);
399 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
400 memcpy(&(pHba->detail[40]), " FW: ", 4);
401 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
402 pHba->detail[48] = '\0';
403 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
404 }
405 adpt_i2o_status_get(pHba);
406 return ;
407}
408
409
410static int adpt_slave_configure(struct scsi_device * device)
411{
412 struct Scsi_Host *host = device->host;
413 adpt_hba* pHba;
414
415 pHba = (adpt_hba *) host->hostdata[0];
416
417 if (host->can_queue && device->tagged_supported) {
418 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
419 host->can_queue - 1);
420 } else {
421 scsi_adjust_queue_depth(device, 0, 1);
422 }
423 return 0;
424}
425
426static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
427{
428 adpt_hba* pHba = NULL;
429 struct adpt_device* pDev = NULL;
430
431 cmd->scsi_done = done;
432
433
434
435
436
437
438
439 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
440 cmd->result = (DID_OK << 16);
441 cmd->scsi_done(cmd);
442 return 0;
443 }
444
445 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
446 if (!pHba) {
447 return FAILED;
448 }
449
450 rmb();
451
452
453
454
455
456
457
458
459 if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
460 pHba->host->last_reset = jiffies;
461 pHba->host->resetting = 1;
462 return 1;
463 }
464
465
466
467 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
468
469
470
471
472
473 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
474
475
476 cmd->result = (DID_NO_CONNECT << 16);
477 cmd->scsi_done(cmd);
478 return 0;
479 }
480 cmd->device->hostdata = pDev;
481 }
482 pDev->pScsi_dev = cmd->device;
483
484
485
486
487
488 if (pDev->state & DPTI_DEV_RESET ) {
489 return FAILED;
490 }
491 return adpt_scsi_to_i2o(pHba, cmd, pDev);
492}
493
494static DEF_SCSI_QCMD(adpt_queue)
495
496static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
497 sector_t capacity, int geom[])
498{
499 int heads=-1;
500 int sectors=-1;
501 int cylinders=-1;
502
503
504
505
506 if (capacity < 0x2000 ) {
507 heads = 18;
508 sectors = 2;
509 }
510
511 else if (capacity < 0x20000) {
512 heads = 64;
513 sectors = 32;
514 }
515
516 else if (capacity < 0x40000) {
517 heads = 65;
518 sectors = 63;
519 }
520
521 else if (capacity < 0x80000) {
522 heads = 128;
523 sectors = 63;
524 }
525
526 else {
527 heads = 255;
528 sectors = 63;
529 }
530 cylinders = sector_div(capacity, heads * sectors);
531
532
533 if(sdev->type == 5) {
534 heads = 252;
535 sectors = 63;
536 cylinders = 1111;
537 }
538
539 geom[0] = heads;
540 geom[1] = sectors;
541 geom[2] = cylinders;
542
543 PDEBUG("adpt_bios_param: exit\n");
544 return 0;
545}
546
547
548static const char *adpt_info(struct Scsi_Host *host)
549{
550 adpt_hba* pHba;
551
552 pHba = (adpt_hba *) host->hostdata[0];
553 return (char *) (pHba->detail);
554}
555
556static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
557{
558 struct adpt_device* d;
559 int id;
560 int chan;
561 adpt_hba* pHba;
562 int unit;
563
564
565 mutex_lock(&adpt_configuration_lock);
566 for (pHba = hba_chain; pHba; pHba = pHba->next) {
567 if (pHba->host == host) {
568 break;
569 }
570 }
571 mutex_unlock(&adpt_configuration_lock);
572 if (pHba == NULL) {
573 return 0;
574 }
575 host = pHba->host;
576
577 seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
578 seq_printf(m, "%s\n", pHba->detail);
579 seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
580 pHba->host->host_no, pHba->name, host->irq);
581 seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
582 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
583
584 seq_printf(m, "Devices:\n");
585 for(chan = 0; chan < MAX_CHANNEL; chan++) {
586 for(id = 0; id < MAX_ID; id++) {
587 d = pHba->channel[chan].device[id];
588 while(d) {
589 seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
590 seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
591
592 unit = d->pI2o_dev->lct_data.tid;
593 seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
594 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
595 scsi_device_online(d->pScsi_dev)? "online":"offline");
596 d = d->next_lun;
597 }
598 }
599 }
600 return 0;
601}
602
603
604
605
606static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
607{
608 return (u32)cmd->serial_number;
609}
610
611
612
613
614
615static struct scsi_cmnd *
616 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
617{
618 struct scsi_cmnd * cmd;
619 struct scsi_device * d;
620
621 if (context == 0)
622 return NULL;
623
624 spin_unlock(pHba->host->host_lock);
625 shost_for_each_device(d, pHba->host) {
626 unsigned long flags;
627 spin_lock_irqsave(&d->list_lock, flags);
628 list_for_each_entry(cmd, &d->cmd_list, list) {
629 if (((u32)cmd->serial_number == context)) {
630 spin_unlock_irqrestore(&d->list_lock, flags);
631 scsi_device_put(d);
632 spin_lock(pHba->host->host_lock);
633 return cmd;
634 }
635 }
636 spin_unlock_irqrestore(&d->list_lock, flags);
637 }
638 spin_lock(pHba->host->host_lock);
639
640 return NULL;
641}
642
643
644
645
646static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
647{
648#if BITS_PER_LONG == 32
649 return (u32)(unsigned long)reply;
650#else
651 ulong flags = 0;
652 u32 nr, i;
653
654 spin_lock_irqsave(pHba->host->host_lock, flags);
655 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
656 for (i = 0; i < nr; i++) {
657 if (pHba->ioctl_reply_context[i] == NULL) {
658 pHba->ioctl_reply_context[i] = reply;
659 break;
660 }
661 }
662 spin_unlock_irqrestore(pHba->host->host_lock, flags);
663 if (i >= nr) {
664 kfree (reply);
665 printk(KERN_WARNING"%s: Too many outstanding "
666 "ioctl commands\n", pHba->name);
667 return (u32)-1;
668 }
669
670 return i;
671#endif
672}
673
674
675
676
677static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
678{
679#if BITS_PER_LONG == 32
680 return (void *)(unsigned long)context;
681#else
682 void *p = pHba->ioctl_reply_context[context];
683 pHba->ioctl_reply_context[context] = NULL;
684
685 return p;
686#endif
687}
688
689
690
691
692
693
694static int adpt_abort(struct scsi_cmnd * cmd)
695{
696 adpt_hba* pHba = NULL;
697 struct adpt_device* dptdevice;
698 u32 msg[5];
699 int rcode;
700
701 if(cmd->serial_number == 0){
702 return FAILED;
703 }
704 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
705 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
706 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
707 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
708 return FAILED;
709 }
710
711 memset(msg, 0, sizeof(msg));
712 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
713 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
714 msg[2] = 0;
715 msg[3]= 0;
716 msg[4] = adpt_cmd_to_context(cmd);
717 if (pHba->host)
718 spin_lock_irq(pHba->host->host_lock);
719 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
720 if (pHba->host)
721 spin_unlock_irq(pHba->host->host_lock);
722 if (rcode != 0) {
723 if(rcode == -EOPNOTSUPP ){
724 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
725 return FAILED;
726 }
727 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
728 return FAILED;
729 }
730 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
731 return SUCCESS;
732}
733
734
735#define I2O_DEVICE_RESET 0x27
736
737
738
739static int adpt_device_reset(struct scsi_cmnd* cmd)
740{
741 adpt_hba* pHba;
742 u32 msg[4];
743 u32 rcode;
744 int old_state;
745 struct adpt_device* d = cmd->device->hostdata;
746
747 pHba = (void*) cmd->device->host->hostdata[0];
748 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
749 if (!d) {
750 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
751 return FAILED;
752 }
753 memset(msg, 0, sizeof(msg));
754 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
755 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
756 msg[2] = 0;
757 msg[3] = 0;
758
759 if (pHba->host)
760 spin_lock_irq(pHba->host->host_lock);
761 old_state = d->state;
762 d->state |= DPTI_DEV_RESET;
763 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
764 d->state = old_state;
765 if (pHba->host)
766 spin_unlock_irq(pHba->host->host_lock);
767 if (rcode != 0) {
768 if(rcode == -EOPNOTSUPP ){
769 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
770 return FAILED;
771 }
772 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
773 return FAILED;
774 } else {
775 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
776 return SUCCESS;
777 }
778}
779
780
781#define I2O_HBA_BUS_RESET 0x87
782
783static int adpt_bus_reset(struct scsi_cmnd* cmd)
784{
785 adpt_hba* pHba;
786 u32 msg[4];
787 u32 rcode;
788
789 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
790 memset(msg, 0, sizeof(msg));
791 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
792 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
793 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
794 msg[2] = 0;
795 msg[3] = 0;
796 if (pHba->host)
797 spin_lock_irq(pHba->host->host_lock);
798 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
799 if (pHba->host)
800 spin_unlock_irq(pHba->host->host_lock);
801 if (rcode != 0) {
802 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
803 return FAILED;
804 } else {
805 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
806 return SUCCESS;
807 }
808}
809
810
811static int __adpt_reset(struct scsi_cmnd* cmd)
812{
813 adpt_hba* pHba;
814 int rcode;
815 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
816 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
817 rcode = adpt_hba_reset(pHba);
818 if(rcode == 0){
819 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
820 return SUCCESS;
821 } else {
822 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
823 return FAILED;
824 }
825}
826
827static int adpt_reset(struct scsi_cmnd* cmd)
828{
829 int rc;
830
831 spin_lock_irq(cmd->device->host->host_lock);
832 rc = __adpt_reset(cmd);
833 spin_unlock_irq(cmd->device->host->host_lock);
834
835 return rc;
836}
837
838
839static int adpt_hba_reset(adpt_hba* pHba)
840{
841 int rcode;
842
843 pHba->state |= DPTI_STATE_RESET;
844
845
846 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
847 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
848 adpt_i2o_delete_hba(pHba);
849 return rcode;
850 }
851
852 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
853 adpt_i2o_delete_hba(pHba);
854 return rcode;
855 }
856 PDEBUG("%s: in HOLD state\n",pHba->name);
857
858 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
859 adpt_i2o_delete_hba(pHba);
860 return rcode;
861 }
862 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
863
864 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
865 adpt_i2o_delete_hba(pHba);
866 return rcode;
867 }
868
869 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
870 adpt_i2o_delete_hba(pHba);
871 return rcode;
872 }
873 pHba->state &= ~DPTI_STATE_RESET;
874
875 adpt_fail_posted_scbs(pHba);
876 return 0;
877}
878
879
880
881
882
883
884
885static void adpt_i2o_sys_shutdown(void)
886{
887 adpt_hba *pHba, *pNext;
888 struct adpt_i2o_post_wait_data *p1, *old;
889
890 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
891 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
892
893
894
895
896 for (pHba = hba_chain; pHba; pHba = pNext) {
897 pNext = pHba->next;
898 adpt_i2o_delete_hba(pHba);
899 }
900
901
902
903
904
905
906 for(p1 = adpt_post_wait_queue; p1;) {
907 old = p1;
908 p1 = p1->next;
909 kfree(old);
910 }
911
912 adpt_post_wait_queue = NULL;
913
914 printk(KERN_INFO "Adaptec I2O controllers down.\n");
915}
916
917static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
918{
919
920 adpt_hba* pHba = NULL;
921 adpt_hba* p = NULL;
922 ulong base_addr0_phys = 0;
923 ulong base_addr1_phys = 0;
924 u32 hba_map0_area_size = 0;
925 u32 hba_map1_area_size = 0;
926 void __iomem *base_addr_virt = NULL;
927 void __iomem *msg_addr_virt = NULL;
928 int dma64 = 0;
929
930 int raptorFlag = FALSE;
931
932 if(pci_enable_device(pDev)) {
933 return -EINVAL;
934 }
935
936 if (pci_request_regions(pDev, "dpt_i2o")) {
937 PERROR("dpti: adpt_config_hba: pci request region failed\n");
938 return -EINVAL;
939 }
940
941 pci_set_master(pDev);
942
943
944
945
946 if (sizeof(dma_addr_t) > 4 &&
947 pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
948 if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
949 dma64 = 1;
950 }
951 if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
952 return -EINVAL;
953
954
955 pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
956
957 base_addr0_phys = pci_resource_start(pDev,0);
958 hba_map0_area_size = pci_resource_len(pDev,0);
959
960
961 if(pDev->device == PCI_DPT_DEVICE_ID){
962 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
963
964 hba_map0_area_size = 0x400000;
965 } else {
966 if(hba_map0_area_size > 0x100000 ){
967 hba_map0_area_size = 0x100000;
968 }
969 }
970 } else {
971
972 base_addr1_phys = pci_resource_start(pDev,1);
973 hba_map1_area_size = pci_resource_len(pDev,1);
974 raptorFlag = TRUE;
975 }
976
977#if BITS_PER_LONG == 64
978
979
980
981
982
983
984
985 if (raptorFlag == TRUE) {
986 if (hba_map0_area_size > 128)
987 hba_map0_area_size = 128;
988 if (hba_map1_area_size > 524288)
989 hba_map1_area_size = 524288;
990 } else {
991 if (hba_map0_area_size > 524288)
992 hba_map0_area_size = 524288;
993 }
994#endif
995
996 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
997 if (!base_addr_virt) {
998 pci_release_regions(pDev);
999 PERROR("dpti: adpt_config_hba: io remap failed\n");
1000 return -EINVAL;
1001 }
1002
1003 if(raptorFlag == TRUE) {
1004 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
1005 if (!msg_addr_virt) {
1006 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
1007 iounmap(base_addr_virt);
1008 pci_release_regions(pDev);
1009 return -EINVAL;
1010 }
1011 } else {
1012 msg_addr_virt = base_addr_virt;
1013 }
1014
1015
1016 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1017 if (!pHba) {
1018 if (msg_addr_virt != base_addr_virt)
1019 iounmap(msg_addr_virt);
1020 iounmap(base_addr_virt);
1021 pci_release_regions(pDev);
1022 return -ENOMEM;
1023 }
1024
1025 mutex_lock(&adpt_configuration_lock);
1026
1027 if(hba_chain != NULL){
1028 for(p = hba_chain; p->next; p = p->next);
1029 p->next = pHba;
1030 } else {
1031 hba_chain = pHba;
1032 }
1033 pHba->next = NULL;
1034 pHba->unit = hba_count;
1035 sprintf(pHba->name, "dpti%d", hba_count);
1036 hba_count++;
1037
1038 mutex_unlock(&adpt_configuration_lock);
1039
1040 pHba->pDev = pDev;
1041 pHba->base_addr_phys = base_addr0_phys;
1042
1043
1044 pHba->base_addr_virt = base_addr_virt;
1045 pHba->msg_addr_virt = msg_addr_virt;
1046 pHba->irq_mask = base_addr_virt+0x30;
1047 pHba->post_port = base_addr_virt+0x40;
1048 pHba->reply_port = base_addr_virt+0x44;
1049
1050 pHba->hrt = NULL;
1051 pHba->lct = NULL;
1052 pHba->lct_size = 0;
1053 pHba->status_block = NULL;
1054 pHba->post_count = 0;
1055 pHba->state = DPTI_STATE_RESET;
1056 pHba->pDev = pDev;
1057 pHba->devices = NULL;
1058 pHba->dma64 = dma64;
1059
1060
1061 spin_lock_init(&pHba->state_lock);
1062 spin_lock_init(&adpt_post_wait_lock);
1063
1064 if(raptorFlag == 0){
1065 printk(KERN_INFO "Adaptec I2O RAID controller"
1066 " %d at %p size=%x irq=%d%s\n",
1067 hba_count-1, base_addr_virt,
1068 hba_map0_area_size, pDev->irq,
1069 dma64 ? " (64-bit DMA)" : "");
1070 } else {
1071 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1072 hba_count-1, pDev->irq,
1073 dma64 ? " (64-bit DMA)" : "");
1074 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1075 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1076 }
1077
1078 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1079 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1080 adpt_i2o_delete_hba(pHba);
1081 return -EINVAL;
1082 }
1083
1084 return 0;
1085}
1086
1087
1088static void adpt_i2o_delete_hba(adpt_hba* pHba)
1089{
1090 adpt_hba* p1;
1091 adpt_hba* p2;
1092 struct i2o_device* d;
1093 struct i2o_device* next;
1094 int i;
1095 int j;
1096 struct adpt_device* pDev;
1097 struct adpt_device* pNext;
1098
1099
1100 mutex_lock(&adpt_configuration_lock);
1101
1102
1103 if(pHba->host){
1104 free_irq(pHba->host->irq, pHba);
1105 }
1106 p2 = NULL;
1107 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1108 if(p1 == pHba) {
1109 if(p2) {
1110 p2->next = p1->next;
1111 } else {
1112 hba_chain = p1->next;
1113 }
1114 break;
1115 }
1116 }
1117
1118 hba_count--;
1119 mutex_unlock(&adpt_configuration_lock);
1120
1121 iounmap(pHba->base_addr_virt);
1122 pci_release_regions(pHba->pDev);
1123 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1124 iounmap(pHba->msg_addr_virt);
1125 }
1126 if(pHba->FwDebugBuffer_P)
1127 iounmap(pHba->FwDebugBuffer_P);
1128 if(pHba->hrt) {
1129 dma_free_coherent(&pHba->pDev->dev,
1130 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1131 pHba->hrt, pHba->hrt_pa);
1132 }
1133 if(pHba->lct) {
1134 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1135 pHba->lct, pHba->lct_pa);
1136 }
1137 if(pHba->status_block) {
1138 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1139 pHba->status_block, pHba->status_block_pa);
1140 }
1141 if(pHba->reply_pool) {
1142 dma_free_coherent(&pHba->pDev->dev,
1143 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1144 pHba->reply_pool, pHba->reply_pool_pa);
1145 }
1146
1147 for(d = pHba->devices; d ; d = next){
1148 next = d->next;
1149 kfree(d);
1150 }
1151 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1152 for(j = 0; j < MAX_ID; j++){
1153 if(pHba->channel[i].device[j] != NULL){
1154 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1155 pNext = pDev->next_lun;
1156 kfree(pDev);
1157 }
1158 }
1159 }
1160 }
1161 pci_dev_put(pHba->pDev);
1162 if (adpt_sysfs_class)
1163 device_destroy(adpt_sysfs_class,
1164 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1165 kfree(pHba);
1166
1167 if(hba_count <= 0){
1168 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1169 if (adpt_sysfs_class) {
1170 class_destroy(adpt_sysfs_class);
1171 adpt_sysfs_class = NULL;
1172 }
1173 }
1174}
1175
1176static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1177{
1178 struct adpt_device* d;
1179
1180 if(chan < 0 || chan >= MAX_CHANNEL)
1181 return NULL;
1182
1183 if( pHba->channel[chan].device == NULL){
1184 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1185 return NULL;
1186 }
1187
1188 d = pHba->channel[chan].device[id];
1189 if(!d || d->tid == 0) {
1190 return NULL;
1191 }
1192
1193
1194 if(d->scsi_lun == lun){
1195 return d;
1196 }
1197
1198
1199 for(d=d->next_lun ; d ; d = d->next_lun){
1200 if(d->scsi_lun == lun){
1201 return d;
1202 }
1203 }
1204 return NULL;
1205}
1206
1207
1208static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1209{
1210
1211
1212
1213 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1214 int status = 0;
1215 ulong flags = 0;
1216 struct adpt_i2o_post_wait_data *p1, *p2;
1217 struct adpt_i2o_post_wait_data *wait_data =
1218 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1219 DECLARE_WAITQUEUE(wait, current);
1220
1221 if (!wait_data)
1222 return -ENOMEM;
1223
1224
1225
1226
1227
1228 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1229
1230
1231 wait_data->next = adpt_post_wait_queue;
1232 adpt_post_wait_queue = wait_data;
1233 adpt_post_wait_id++;
1234 adpt_post_wait_id &= 0x7fff;
1235 wait_data->id = adpt_post_wait_id;
1236 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1237
1238 wait_data->wq = &adpt_wq_i2o_post;
1239 wait_data->status = -ETIMEDOUT;
1240
1241 add_wait_queue(&adpt_wq_i2o_post, &wait);
1242
1243 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1244 timeout *= HZ;
1245 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1246 set_current_state(TASK_INTERRUPTIBLE);
1247 if(pHba->host)
1248 spin_unlock_irq(pHba->host->host_lock);
1249 if (!timeout)
1250 schedule();
1251 else{
1252 timeout = schedule_timeout(timeout);
1253 if (timeout == 0) {
1254
1255
1256
1257 status = -ETIME;
1258 }
1259 }
1260 if(pHba->host)
1261 spin_lock_irq(pHba->host->host_lock);
1262 }
1263 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1264
1265 if(status == -ETIMEDOUT){
1266 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1267
1268 return status;
1269 }
1270
1271
1272 p2 = NULL;
1273 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1274 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1275 if(p1 == wait_data) {
1276 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1277 status = -EOPNOTSUPP;
1278 }
1279 if(p2) {
1280 p2->next = p1->next;
1281 } else {
1282 adpt_post_wait_queue = p1->next;
1283 }
1284 break;
1285 }
1286 }
1287 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1288
1289 kfree(wait_data);
1290
1291 return status;
1292}
1293
1294
1295static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1296{
1297
1298 u32 m = EMPTY_QUEUE;
1299 u32 __iomem *msg;
1300 ulong timeout = jiffies + 30*HZ;
1301 do {
1302 rmb();
1303 m = readl(pHba->post_port);
1304 if (m != EMPTY_QUEUE) {
1305 break;
1306 }
1307 if(time_after(jiffies,timeout)){
1308 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1309 return -ETIMEDOUT;
1310 }
1311 schedule_timeout_uninterruptible(1);
1312 } while(m == EMPTY_QUEUE);
1313
1314 msg = pHba->msg_addr_virt + m;
1315 memcpy_toio(msg, data, len);
1316 wmb();
1317
1318
1319 writel(m, pHba->post_port);
1320 wmb();
1321
1322 return 0;
1323}
1324
1325
1326static void adpt_i2o_post_wait_complete(u32 context, int status)
1327{
1328 struct adpt_i2o_post_wait_data *p1 = NULL;
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342 context &= 0x7fff;
1343
1344 spin_lock(&adpt_post_wait_lock);
1345 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1346 if(p1->id == context) {
1347 p1->status = status;
1348 spin_unlock(&adpt_post_wait_lock);
1349 wake_up_interruptible(p1->wq);
1350 return;
1351 }
1352 }
1353 spin_unlock(&adpt_post_wait_lock);
1354
1355 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1356 printk(KERN_DEBUG" Tasks in wait queue:\n");
1357 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1358 printk(KERN_DEBUG" %d\n",p1->id);
1359 }
1360 return;
1361}
1362
1363static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1364{
1365 u32 msg[8];
1366 u8* status;
1367 dma_addr_t addr;
1368 u32 m = EMPTY_QUEUE ;
1369 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1370
1371 if(pHba->initialized == FALSE) {
1372 timeout = jiffies + (25*HZ);
1373 } else {
1374 adpt_i2o_quiesce_hba(pHba);
1375 }
1376
1377 do {
1378 rmb();
1379 m = readl(pHba->post_port);
1380 if (m != EMPTY_QUEUE) {
1381 break;
1382 }
1383 if(time_after(jiffies,timeout)){
1384 printk(KERN_WARNING"Timeout waiting for message!\n");
1385 return -ETIMEDOUT;
1386 }
1387 schedule_timeout_uninterruptible(1);
1388 } while (m == EMPTY_QUEUE);
1389
1390 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1391 if(status == NULL) {
1392 adpt_send_nop(pHba, m);
1393 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1394 return -ENOMEM;
1395 }
1396 memset(status,0,4);
1397
1398 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1399 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1400 msg[2]=0;
1401 msg[3]=0;
1402 msg[4]=0;
1403 msg[5]=0;
1404 msg[6]=dma_low(addr);
1405 msg[7]=dma_high(addr);
1406
1407 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1408 wmb();
1409 writel(m, pHba->post_port);
1410 wmb();
1411
1412 while(*status == 0){
1413 if(time_after(jiffies,timeout)){
1414 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1415
1416
1417
1418
1419 return -ETIMEDOUT;
1420 }
1421 rmb();
1422 schedule_timeout_uninterruptible(1);
1423 }
1424
1425 if(*status == 0x01 ) {
1426 PDEBUG("%s: Reset in progress...\n", pHba->name);
1427
1428
1429 do {
1430 rmb();
1431 m = readl(pHba->post_port);
1432 if (m != EMPTY_QUEUE) {
1433 break;
1434 }
1435 if(time_after(jiffies,timeout)){
1436 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1437
1438
1439
1440
1441 return -ETIMEDOUT;
1442 }
1443 schedule_timeout_uninterruptible(1);
1444 } while (m == EMPTY_QUEUE);
1445
1446 adpt_send_nop(pHba, m);
1447 }
1448 adpt_i2o_status_get(pHba);
1449 if(*status == 0x02 ||
1450 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1451 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1452 pHba->name);
1453 } else {
1454 PDEBUG("%s: Reset completed.\n", pHba->name);
1455 }
1456
1457 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1458#ifdef UARTDELAY
1459
1460
1461 adpt_delay(20000);
1462#endif
1463 return 0;
1464}
1465
1466
1467static int adpt_i2o_parse_lct(adpt_hba* pHba)
1468{
1469 int i;
1470 int max;
1471 int tid;
1472 struct i2o_device *d;
1473 i2o_lct *lct = pHba->lct;
1474 u8 bus_no = 0;
1475 s16 scsi_id;
1476 s16 scsi_lun;
1477 u32 buf[10];
1478 struct adpt_device* pDev;
1479
1480 if (lct == NULL) {
1481 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1482 return -1;
1483 }
1484
1485 max = lct->table_size;
1486 max -= 3;
1487 max /= 9;
1488
1489 for(i=0;i<max;i++) {
1490 if( lct->lct_entry[i].user_tid != 0xfff){
1491
1492
1493
1494
1495
1496
1497
1498 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1499 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1500 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1501 continue;
1502 }
1503 tid = lct->lct_entry[i].tid;
1504
1505 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1506 continue;
1507 }
1508 bus_no = buf[0]>>16;
1509 scsi_id = buf[1];
1510 scsi_lun = (buf[2]>>8 )&0xff;
1511 if(bus_no >= MAX_CHANNEL) {
1512 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1513 continue;
1514 }
1515 if (scsi_id >= MAX_ID){
1516 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1517 continue;
1518 }
1519 if(bus_no > pHba->top_scsi_channel){
1520 pHba->top_scsi_channel = bus_no;
1521 }
1522 if(scsi_id > pHba->top_scsi_id){
1523 pHba->top_scsi_id = scsi_id;
1524 }
1525 if(scsi_lun > pHba->top_scsi_lun){
1526 pHba->top_scsi_lun = scsi_lun;
1527 }
1528 continue;
1529 }
1530 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1531 if(d==NULL)
1532 {
1533 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1534 return -ENOMEM;
1535 }
1536
1537 d->controller = pHba;
1538 d->next = NULL;
1539
1540 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1541
1542 d->flags = 0;
1543 tid = d->lct_data.tid;
1544 adpt_i2o_report_hba_unit(pHba, d);
1545 adpt_i2o_install_device(pHba, d);
1546 }
1547 bus_no = 0;
1548 for(d = pHba->devices; d ; d = d->next) {
1549 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1550 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1551 tid = d->lct_data.tid;
1552
1553
1554 if(bus_no > pHba->top_scsi_channel){
1555 pHba->top_scsi_channel = bus_no;
1556 }
1557 pHba->channel[bus_no].type = d->lct_data.class_id;
1558 pHba->channel[bus_no].tid = tid;
1559 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1560 {
1561 pHba->channel[bus_no].scsi_id = buf[1];
1562 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1563 }
1564
1565 bus_no++;
1566 if(bus_no >= MAX_CHANNEL) {
1567 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1568 break;
1569 }
1570 }
1571 }
1572
1573
1574 for(d = pHba->devices; d ; d = d->next) {
1575 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1576 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1577 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1578
1579 tid = d->lct_data.tid;
1580 scsi_id = -1;
1581
1582 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1583 bus_no = buf[0]>>16;
1584 scsi_id = buf[1];
1585 scsi_lun = (buf[2]>>8 )&0xff;
1586 if(bus_no >= MAX_CHANNEL) {
1587 continue;
1588 }
1589 if (scsi_id >= MAX_ID) {
1590 continue;
1591 }
1592 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1593 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1594 if(pDev == NULL) {
1595 return -ENOMEM;
1596 }
1597 pHba->channel[bus_no].device[scsi_id] = pDev;
1598 } else {
1599 for( pDev = pHba->channel[bus_no].device[scsi_id];
1600 pDev->next_lun; pDev = pDev->next_lun){
1601 }
1602 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1603 if(pDev->next_lun == NULL) {
1604 return -ENOMEM;
1605 }
1606 pDev = pDev->next_lun;
1607 }
1608 pDev->tid = tid;
1609 pDev->scsi_channel = bus_no;
1610 pDev->scsi_id = scsi_id;
1611 pDev->scsi_lun = scsi_lun;
1612 pDev->pI2o_dev = d;
1613 d->owner = pDev;
1614 pDev->type = (buf[0])&0xff;
1615 pDev->flags = (buf[0]>>8)&0xff;
1616 if(scsi_id > pHba->top_scsi_id){
1617 pHba->top_scsi_id = scsi_id;
1618 }
1619 if(scsi_lun > pHba->top_scsi_lun){
1620 pHba->top_scsi_lun = scsi_lun;
1621 }
1622 }
1623 if(scsi_id == -1){
1624 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1625 d->lct_data.identity_tag);
1626 }
1627 }
1628 }
1629 return 0;
1630}
1631
1632
1633
1634
1635
1636
1637
1638static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1639{
1640 mutex_lock(&adpt_configuration_lock);
1641 d->controller=pHba;
1642 d->owner=NULL;
1643 d->next=pHba->devices;
1644 d->prev=NULL;
1645 if (pHba->devices != NULL){
1646 pHba->devices->prev=d;
1647 }
1648 pHba->devices=d;
1649 *d->dev_name = 0;
1650
1651 mutex_unlock(&adpt_configuration_lock);
1652 return 0;
1653}
1654
1655static int adpt_open(struct inode *inode, struct file *file)
1656{
1657 int minor;
1658 adpt_hba* pHba;
1659
1660 mutex_lock(&adpt_mutex);
1661
1662
1663 minor = iminor(inode);
1664 if (minor >= hba_count) {
1665 mutex_unlock(&adpt_mutex);
1666 return -ENXIO;
1667 }
1668 mutex_lock(&adpt_configuration_lock);
1669 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1670 if (pHba->unit == minor) {
1671 break;
1672 }
1673 }
1674 if (pHba == NULL) {
1675 mutex_unlock(&adpt_configuration_lock);
1676 mutex_unlock(&adpt_mutex);
1677 return -ENXIO;
1678 }
1679
1680
1681
1682
1683
1684
1685 pHba->in_use = 1;
1686 mutex_unlock(&adpt_configuration_lock);
1687 mutex_unlock(&adpt_mutex);
1688
1689 return 0;
1690}
1691
1692static int adpt_close(struct inode *inode, struct file *file)
1693{
1694 int minor;
1695 adpt_hba* pHba;
1696
1697 minor = iminor(inode);
1698 if (minor >= hba_count) {
1699 return -ENXIO;
1700 }
1701 mutex_lock(&adpt_configuration_lock);
1702 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1703 if (pHba->unit == minor) {
1704 break;
1705 }
1706 }
1707 mutex_unlock(&adpt_configuration_lock);
1708 if (pHba == NULL) {
1709 return -ENXIO;
1710 }
1711
1712 pHba->in_use = 0;
1713
1714 return 0;
1715}
1716
1717
1718static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1719{
1720 u32 msg[MAX_MESSAGE_SIZE];
1721 u32* reply = NULL;
1722 u32 size = 0;
1723 u32 reply_size = 0;
1724 u32 __user *user_msg = arg;
1725 u32 __user * user_reply = NULL;
1726 void *sg_list[pHba->sg_tablesize];
1727 u32 sg_offset = 0;
1728 u32 sg_count = 0;
1729 int sg_index = 0;
1730 u32 i = 0;
1731 u32 rcode = 0;
1732 void *p = NULL;
1733 dma_addr_t addr;
1734 ulong flags = 0;
1735
1736 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1737
1738 if(get_user(size, &user_msg[0])){
1739 return -EFAULT;
1740 }
1741 size = size>>16;
1742
1743 user_reply = &user_msg[size];
1744 if(size > MAX_MESSAGE_SIZE){
1745 return -EFAULT;
1746 }
1747 size *= 4;
1748
1749
1750 if(copy_from_user(msg, user_msg, size)) {
1751 return -EFAULT;
1752 }
1753 get_user(reply_size, &user_reply[0]);
1754 reply_size = reply_size>>16;
1755 if(reply_size > REPLY_FRAME_SIZE){
1756 reply_size = REPLY_FRAME_SIZE;
1757 }
1758 reply_size *= 4;
1759 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1760 if(reply == NULL) {
1761 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1762 return -ENOMEM;
1763 }
1764 sg_offset = (msg[0]>>4)&0xf;
1765 msg[2] = 0x40000000;
1766 msg[3] = adpt_ioctl_to_context(pHba, reply);
1767 if (msg[3] == (u32)-1)
1768 return -EBUSY;
1769
1770 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1771 if(sg_offset) {
1772
1773 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1774 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1775 if (sg_count > pHba->sg_tablesize){
1776 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1777 kfree (reply);
1778 return -EINVAL;
1779 }
1780
1781 for(i = 0; i < sg_count; i++) {
1782 int sg_size;
1783
1784 if (!(sg[i].flag_count & 0x10000000 )) {
1785 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1786 rcode = -EINVAL;
1787 goto cleanup;
1788 }
1789 sg_size = sg[i].flag_count & 0xffffff;
1790
1791 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1792 if(!p) {
1793 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1794 pHba->name,sg_size,i,sg_count);
1795 rcode = -ENOMEM;
1796 goto cleanup;
1797 }
1798 sg_list[sg_index++] = p;
1799
1800 if(sg[i].flag_count & 0x04000000 ) {
1801
1802 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1803 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1804 rcode = -EFAULT;
1805 goto cleanup;
1806 }
1807 }
1808
1809 sg[i].addr_bus = addr;
1810 }
1811 }
1812
1813 do {
1814 if(pHba->host)
1815 spin_lock_irqsave(pHba->host->host_lock, flags);
1816
1817
1818
1819
1820
1821 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1822 if (rcode != 0)
1823 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1824 rcode, reply);
1825
1826 if(pHba->host)
1827 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1828 } while(rcode == -ETIMEDOUT);
1829
1830 if(rcode){
1831 goto cleanup;
1832 }
1833
1834 if(sg_offset) {
1835
1836 u32 j;
1837
1838 struct sg_simple_element* sg;
1839 int sg_size;
1840
1841
1842 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1843
1844 if(get_user(size, &user_msg[0])){
1845 rcode = -EFAULT;
1846 goto cleanup;
1847 }
1848 size = size>>16;
1849 size *= 4;
1850 if (size > MAX_MESSAGE_SIZE) {
1851 rcode = -EINVAL;
1852 goto cleanup;
1853 }
1854
1855 if (copy_from_user (msg, user_msg, size)) {
1856 rcode = -EFAULT;
1857 goto cleanup;
1858 }
1859 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1860
1861
1862 sg = (struct sg_simple_element*)(msg + sg_offset);
1863 for (j = 0; j < sg_count; j++) {
1864
1865 if(! (sg[j].flag_count & 0x4000000 )) {
1866 sg_size = sg[j].flag_count & 0xffffff;
1867
1868 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1869 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1870 rcode = -EFAULT;
1871 goto cleanup;
1872 }
1873 }
1874 }
1875 }
1876
1877
1878 if (reply_size) {
1879
1880 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1881 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1882 rcode = -EFAULT;
1883 }
1884 if(copy_to_user(user_reply, reply, reply_size)) {
1885 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1886 rcode = -EFAULT;
1887 }
1888 }
1889
1890
1891cleanup:
1892 if (rcode != -ETIME && rcode != -EINTR) {
1893 struct sg_simple_element *sg =
1894 (struct sg_simple_element*) (msg +sg_offset);
1895 kfree (reply);
1896 while(sg_index) {
1897 if(sg_list[--sg_index]) {
1898 dma_free_coherent(&pHba->pDev->dev,
1899 sg[sg_index].flag_count & 0xffffff,
1900 sg_list[sg_index],
1901 sg[sg_index].addr_bus);
1902 }
1903 }
1904 }
1905 return rcode;
1906}
1907
1908#if defined __ia64__
1909static void adpt_ia64_info(sysInfo_S* si)
1910{
1911
1912
1913
1914 si->processorType = PROC_IA64;
1915}
1916#endif
1917
1918#if defined __sparc__
1919static void adpt_sparc_info(sysInfo_S* si)
1920{
1921
1922
1923
1924 si->processorType = PROC_ULTRASPARC;
1925}
1926#endif
1927#if defined __alpha__
1928static void adpt_alpha_info(sysInfo_S* si)
1929{
1930
1931
1932
1933 si->processorType = PROC_ALPHA;
1934}
1935#endif
1936
1937#if defined __i386__
1938static void adpt_i386_info(sysInfo_S* si)
1939{
1940
1941
1942
1943 switch (boot_cpu_data.x86) {
1944 case CPU_386:
1945 si->processorType = PROC_386;
1946 break;
1947 case CPU_486:
1948 si->processorType = PROC_486;
1949 break;
1950 case CPU_586:
1951 si->processorType = PROC_PENTIUM;
1952 break;
1953 default:
1954 si->processorType = PROC_PENTIUM;
1955 break;
1956 }
1957}
1958#endif
1959
1960
1961
1962
1963
1964
1965
1966static int adpt_system_info(void __user *buffer)
1967{
1968 sysInfo_S si;
1969
1970 memset(&si, 0, sizeof(si));
1971
1972 si.osType = OS_LINUX;
1973 si.osMajorVersion = 0;
1974 si.osMinorVersion = 0;
1975 si.osRevision = 0;
1976 si.busType = SI_PCI_BUS;
1977 si.processorFamily = DPTI_sig.dsProcessorFamily;
1978
1979#if defined __i386__
1980 adpt_i386_info(&si);
1981#elif defined (__ia64__)
1982 adpt_ia64_info(&si);
1983#elif defined(__sparc__)
1984 adpt_sparc_info(&si);
1985#elif defined (__alpha__)
1986 adpt_alpha_info(&si);
1987#else
1988 si.processorType = 0xff ;
1989#endif
1990 if (copy_to_user(buffer, &si, sizeof(si))){
1991 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1992 return -EFAULT;
1993 }
1994
1995 return 0;
1996}
1997
1998static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1999{
2000 int minor;
2001 int error = 0;
2002 adpt_hba* pHba;
2003 ulong flags = 0;
2004 void __user *argp = (void __user *)arg;
2005
2006 minor = iminor(inode);
2007 if (minor >= DPTI_MAX_HBA){
2008 return -ENXIO;
2009 }
2010 mutex_lock(&adpt_configuration_lock);
2011 for (pHba = hba_chain; pHba; pHba = pHba->next) {
2012 if (pHba->unit == minor) {
2013 break;
2014 }
2015 }
2016 mutex_unlock(&adpt_configuration_lock);
2017 if(pHba == NULL){
2018 return -ENXIO;
2019 }
2020
2021 while((volatile u32) pHba->state & DPTI_STATE_RESET )
2022 schedule_timeout_uninterruptible(2);
2023
2024 switch (cmd) {
2025
2026 case DPT_SIGNATURE:
2027 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2028 return -EFAULT;
2029 }
2030 break;
2031 case I2OUSRCMD:
2032 return adpt_i2o_passthru(pHba, argp);
2033
2034 case DPT_CTRLINFO:{
2035 drvrHBAinfo_S HbaInfo;
2036
2037#define FLG_OSD_PCI_VALID 0x0001
2038#define FLG_OSD_DMA 0x0002
2039#define FLG_OSD_I2O 0x0004
2040 memset(&HbaInfo, 0, sizeof(HbaInfo));
2041 HbaInfo.drvrHBAnum = pHba->unit;
2042 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2043 HbaInfo.blinkState = adpt_read_blink_led(pHba);
2044 HbaInfo.pciBusNum = pHba->pDev->bus->number;
2045 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2046 HbaInfo.Interrupt = pHba->pDev->irq;
2047 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2048 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2049 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2050 return -EFAULT;
2051 }
2052 break;
2053 }
2054 case DPT_SYSINFO:
2055 return adpt_system_info(argp);
2056 case DPT_BLINKLED:{
2057 u32 value;
2058 value = (u32)adpt_read_blink_led(pHba);
2059 if (copy_to_user(argp, &value, sizeof(value))) {
2060 return -EFAULT;
2061 }
2062 break;
2063 }
2064 case I2ORESETCMD:
2065 if(pHba->host)
2066 spin_lock_irqsave(pHba->host->host_lock, flags);
2067 adpt_hba_reset(pHba);
2068 if(pHba->host)
2069 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2070 break;
2071 case I2ORESCANCMD:
2072 adpt_rescan(pHba);
2073 break;
2074 default:
2075 return -EINVAL;
2076 }
2077
2078 return error;
2079}
2080
2081static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2082{
2083 struct inode *inode;
2084 long ret;
2085
2086 inode = file_inode(file);
2087
2088 mutex_lock(&adpt_mutex);
2089 ret = adpt_ioctl(inode, file, cmd, arg);
2090 mutex_unlock(&adpt_mutex);
2091
2092 return ret;
2093}
2094
2095#ifdef CONFIG_COMPAT
2096static long compat_adpt_ioctl(struct file *file,
2097 unsigned int cmd, unsigned long arg)
2098{
2099 struct inode *inode;
2100 long ret;
2101
2102 inode = file_inode(file);
2103
2104 mutex_lock(&adpt_mutex);
2105
2106 switch(cmd) {
2107 case DPT_SIGNATURE:
2108 case I2OUSRCMD:
2109 case DPT_CTRLINFO:
2110 case DPT_SYSINFO:
2111 case DPT_BLINKLED:
2112 case I2ORESETCMD:
2113 case I2ORESCANCMD:
2114 case (DPT_TARGET_BUSY & 0xFFFF):
2115 case DPT_TARGET_BUSY:
2116 ret = adpt_ioctl(inode, file, cmd, arg);
2117 break;
2118 default:
2119 ret = -ENOIOCTLCMD;
2120 }
2121
2122 mutex_unlock(&adpt_mutex);
2123
2124 return ret;
2125}
2126#endif
2127
2128static irqreturn_t adpt_isr(int irq, void *dev_id)
2129{
2130 struct scsi_cmnd* cmd;
2131 adpt_hba* pHba = dev_id;
2132 u32 m;
2133 void __iomem *reply;
2134 u32 status=0;
2135 u32 context;
2136 ulong flags = 0;
2137 int handled = 0;
2138
2139 if (pHba == NULL){
2140 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2141 return IRQ_NONE;
2142 }
2143 if(pHba->host)
2144 spin_lock_irqsave(pHba->host->host_lock, flags);
2145
2146 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2147 m = readl(pHba->reply_port);
2148 if(m == EMPTY_QUEUE){
2149
2150 rmb();
2151 m = readl(pHba->reply_port);
2152 if(m == EMPTY_QUEUE){
2153
2154 printk(KERN_ERR"dpti: Could not get reply frame\n");
2155 goto out;
2156 }
2157 }
2158 if (pHba->reply_pool_pa <= m &&
2159 m < pHba->reply_pool_pa +
2160 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2161 reply = (u8 *)pHba->reply_pool +
2162 (m - pHba->reply_pool_pa);
2163 } else {
2164
2165 printk(KERN_ERR "dpti: reply frame not from pool\n");
2166 reply = (u8 *)bus_to_virt(m);
2167 }
2168
2169 if (readl(reply) & MSG_FAIL) {
2170 u32 old_m = readl(reply+28);
2171 void __iomem *msg;
2172 u32 old_context;
2173 PDEBUG("%s: Failed message\n",pHba->name);
2174 if(old_m >= 0x100000){
2175 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2176 writel(m,pHba->reply_port);
2177 continue;
2178 }
2179
2180 msg = pHba->msg_addr_virt + old_m;
2181 old_context = readl(msg+12);
2182 writel(old_context, reply+12);
2183 adpt_send_nop(pHba, old_m);
2184 }
2185 context = readl(reply+8);
2186 if(context & 0x40000000){
2187 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2188 if( p != NULL) {
2189 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2190 }
2191
2192 }
2193 if(context & 0x80000000){
2194 status = readl(reply+16);
2195 if(status >> 24){
2196 status &= 0xffff;
2197 } else {
2198 status = I2O_POST_WAIT_OK;
2199 }
2200 if(!(context & 0x40000000)) {
2201 cmd = adpt_cmd_from_context(pHba,
2202 readl(reply+12));
2203 if(cmd != NULL) {
2204 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2205 }
2206 }
2207 adpt_i2o_post_wait_complete(context, status);
2208 } else {
2209 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2210 if(cmd != NULL){
2211 scsi_dma_unmap(cmd);
2212 if(cmd->serial_number != 0) {
2213 adpt_i2o_to_scsi(reply, cmd);
2214 }
2215 }
2216 }
2217 writel(m, pHba->reply_port);
2218 wmb();
2219 rmb();
2220 }
2221 handled = 1;
2222out: if(pHba->host)
2223 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2224 return IRQ_RETVAL(handled);
2225}
2226
2227static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2228{
2229 int i;
2230 u32 msg[MAX_MESSAGE_SIZE];
2231 u32* mptr;
2232 u32* lptr;
2233 u32 *lenptr;
2234 int direction;
2235 int scsidir;
2236 int nseg;
2237 u32 len;
2238 u32 reqlen;
2239 s32 rcode;
2240 dma_addr_t addr;
2241
2242 memset(msg, 0 , sizeof(msg));
2243 len = scsi_bufflen(cmd);
2244 direction = 0x00000000;
2245
2246 scsidir = 0x00000000;
2247 if(len) {
2248
2249
2250
2251
2252
2253
2254 switch(cmd->sc_data_direction){
2255 case DMA_FROM_DEVICE:
2256 scsidir =0x40000000;
2257 break;
2258 case DMA_TO_DEVICE:
2259 direction=0x04000000;
2260 scsidir =0x80000000;
2261 break;
2262 case DMA_NONE:
2263 break;
2264 case DMA_BIDIRECTIONAL:
2265 scsidir =0x40000000;
2266
2267 break;
2268 default:
2269 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2270 pHba->name, cmd->cmnd[0]);
2271 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2272 cmd->scsi_done(cmd);
2273 return 0;
2274 }
2275 }
2276
2277
2278 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2279 msg[2] = 0;
2280 msg[3] = adpt_cmd_to_context(cmd);
2281
2282
2283 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2284 msg[5] = d->tid;
2285
2286
2287
2288
2289 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2290
2291 mptr=msg+7;
2292
2293
2294 memset(mptr, 0, 16);
2295 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2296 mptr+=4;
2297 lenptr=mptr++;
2298 if (dpt_dma64(pHba)) {
2299 reqlen = 16;
2300 *mptr++ = (0x7C<<24)+(2<<16)+0x02;
2301 *mptr++ = 1 << PAGE_SHIFT;
2302 } else {
2303 reqlen = 14;
2304 }
2305
2306
2307 nseg = scsi_dma_map(cmd);
2308 BUG_ON(nseg < 0);
2309 if (nseg) {
2310 struct scatterlist *sg;
2311
2312 len = 0;
2313 scsi_for_each_sg(cmd, sg, nseg, i) {
2314 lptr = mptr;
2315 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2316 len+=sg_dma_len(sg);
2317 addr = sg_dma_address(sg);
2318 *mptr++ = dma_low(addr);
2319 if (dpt_dma64(pHba))
2320 *mptr++ = dma_high(addr);
2321
2322 if (i == nseg - 1)
2323 *lptr = direction|0xD0000000|sg_dma_len(sg);
2324 }
2325 reqlen = mptr - msg;
2326 *lenptr = len;
2327
2328 if(cmd->underflow && len != cmd->underflow){
2329 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2330 len, cmd->underflow);
2331 }
2332 } else {
2333 *lenptr = len = 0;
2334 reqlen = 12;
2335 }
2336
2337
2338 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2339
2340
2341 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2342 if (rcode == 0) {
2343 return 0;
2344 }
2345 return rcode;
2346}
2347
2348
2349static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2350{
2351 struct Scsi_Host *host;
2352
2353 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2354 if (host == NULL) {
2355 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2356 return -1;
2357 }
2358 host->hostdata[0] = (unsigned long)pHba;
2359 pHba->host = host;
2360
2361 host->irq = pHba->pDev->irq;
2362
2363
2364
2365 host->io_port = 0;
2366 host->n_io_port = 0;
2367
2368 host->max_id = 16;
2369 host->max_lun = 256;
2370 host->max_channel = pHba->top_scsi_channel + 1;
2371 host->cmd_per_lun = 1;
2372 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2373 host->sg_tablesize = pHba->sg_tablesize;
2374 host->can_queue = pHba->post_fifo_size;
2375
2376 return 0;
2377}
2378
2379
2380static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2381{
2382 adpt_hba* pHba;
2383 u32 hba_status;
2384 u32 dev_status;
2385 u32 reply_flags = readl(reply) & 0xff00;
2386
2387
2388
2389 u16 detailed_status = readl(reply+16) &0xffff;
2390 dev_status = (detailed_status & 0xff);
2391 hba_status = detailed_status >> 8;
2392
2393
2394 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2395
2396 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2397
2398 cmd->sense_buffer[0] = '\0';
2399
2400 if(!(reply_flags & MSG_FAIL)) {
2401 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2402 case I2O_SCSI_DSC_SUCCESS:
2403 cmd->result = (DID_OK << 16);
2404
2405 if (readl(reply+20) < cmd->underflow) {
2406 cmd->result = (DID_ERROR <<16);
2407 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2408 }
2409 break;
2410 case I2O_SCSI_DSC_REQUEST_ABORTED:
2411 cmd->result = (DID_ABORT << 16);
2412 break;
2413 case I2O_SCSI_DSC_PATH_INVALID:
2414 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2415 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2416 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2417 case I2O_SCSI_DSC_NO_ADAPTER:
2418 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2419 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2420 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2421 cmd->result = (DID_TIME_OUT << 16);
2422 break;
2423 case I2O_SCSI_DSC_ADAPTER_BUSY:
2424 case I2O_SCSI_DSC_BUS_BUSY:
2425 cmd->result = (DID_BUS_BUSY << 16);
2426 break;
2427 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2428 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2429 cmd->result = (DID_RESET << 16);
2430 break;
2431 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2432 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2433 cmd->result = (DID_PARITY << 16);
2434 break;
2435 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2436 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2437 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2438 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2439 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2440 case I2O_SCSI_DSC_DATA_OVERRUN:
2441 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2442 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2443 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2444 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2445 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2446 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2447 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2448 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2449 case I2O_SCSI_DSC_INVALID_CDB:
2450 case I2O_SCSI_DSC_LUN_INVALID:
2451 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2452 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2453 case I2O_SCSI_DSC_NO_NEXUS:
2454 case I2O_SCSI_DSC_CDB_RECEIVED:
2455 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2456 case I2O_SCSI_DSC_QUEUE_FROZEN:
2457 case I2O_SCSI_DSC_REQUEST_INVALID:
2458 default:
2459 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2460 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2461 hba_status, dev_status, cmd->cmnd[0]);
2462 cmd->result = (DID_ERROR << 16);
2463 break;
2464 }
2465
2466
2467
2468 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2469 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2470
2471 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2472 if(cmd->sense_buffer[0] == 0x70 &&
2473 cmd->sense_buffer[2] == DATA_PROTECT ){
2474
2475 cmd->result = (DID_TIME_OUT << 16);
2476 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2477 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2478 hba_status, dev_status, cmd->cmnd[0]);
2479
2480 }
2481 }
2482 } else {
2483
2484
2485
2486
2487 cmd->result = (DID_TIME_OUT << 16);
2488 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2489 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2490 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2491 }
2492
2493 cmd->result |= (dev_status);
2494
2495 if(cmd->scsi_done != NULL){
2496 cmd->scsi_done(cmd);
2497 }
2498 return cmd->result;
2499}
2500
2501
2502static s32 adpt_rescan(adpt_hba* pHba)
2503{
2504 s32 rcode;
2505 ulong flags = 0;
2506
2507 if(pHba->host)
2508 spin_lock_irqsave(pHba->host->host_lock, flags);
2509 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2510 goto out;
2511 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2512 goto out;
2513 rcode = 0;
2514out: if(pHba->host)
2515 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2516 return rcode;
2517}
2518
2519
2520static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2521{
2522 int i;
2523 int max;
2524 int tid;
2525 struct i2o_device *d;
2526 i2o_lct *lct = pHba->lct;
2527 u8 bus_no = 0;
2528 s16 scsi_id;
2529 s16 scsi_lun;
2530 u32 buf[10];
2531 struct adpt_device* pDev = NULL;
2532 struct i2o_device* pI2o_dev = NULL;
2533
2534 if (lct == NULL) {
2535 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2536 return -1;
2537 }
2538
2539 max = lct->table_size;
2540 max -= 3;
2541 max /= 9;
2542
2543
2544 for (d = pHba->devices; d; d = d->next) {
2545 pDev =(struct adpt_device*) d->owner;
2546 if(!pDev){
2547 continue;
2548 }
2549 pDev->state |= DPTI_DEV_UNSCANNED;
2550 }
2551
2552 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2553
2554 for(i=0;i<max;i++) {
2555 if( lct->lct_entry[i].user_tid != 0xfff){
2556 continue;
2557 }
2558
2559 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2560 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2561 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2562 tid = lct->lct_entry[i].tid;
2563 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2564 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2565 continue;
2566 }
2567 bus_no = buf[0]>>16;
2568 if (bus_no >= MAX_CHANNEL) {
2569 printk(KERN_WARNING
2570 "%s: Channel number %d out of range\n",
2571 pHba->name, bus_no);
2572 continue;
2573 }
2574
2575 scsi_id = buf[1];
2576 scsi_lun = (buf[2]>>8 )&0xff;
2577 pDev = pHba->channel[bus_no].device[scsi_id];
2578
2579 while(pDev) {
2580 if(pDev->scsi_lun == scsi_lun) {
2581 break;
2582 }
2583 pDev = pDev->next_lun;
2584 }
2585 if(!pDev ) {
2586 d = kmalloc(sizeof(struct i2o_device),
2587 GFP_ATOMIC);
2588 if(d==NULL)
2589 {
2590 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2591 return -ENOMEM;
2592 }
2593
2594 d->controller = pHba;
2595 d->next = NULL;
2596
2597 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2598
2599 d->flags = 0;
2600 adpt_i2o_report_hba_unit(pHba, d);
2601 adpt_i2o_install_device(pHba, d);
2602
2603 pDev = pHba->channel[bus_no].device[scsi_id];
2604 if( pDev == NULL){
2605 pDev =
2606 kzalloc(sizeof(struct adpt_device),
2607 GFP_ATOMIC);
2608 if(pDev == NULL) {
2609 return -ENOMEM;
2610 }
2611 pHba->channel[bus_no].device[scsi_id] = pDev;
2612 } else {
2613 while (pDev->next_lun) {
2614 pDev = pDev->next_lun;
2615 }
2616 pDev = pDev->next_lun =
2617 kzalloc(sizeof(struct adpt_device),
2618 GFP_ATOMIC);
2619 if(pDev == NULL) {
2620 return -ENOMEM;
2621 }
2622 }
2623 pDev->tid = d->lct_data.tid;
2624 pDev->scsi_channel = bus_no;
2625 pDev->scsi_id = scsi_id;
2626 pDev->scsi_lun = scsi_lun;
2627 pDev->pI2o_dev = d;
2628 d->owner = pDev;
2629 pDev->type = (buf[0])&0xff;
2630 pDev->flags = (buf[0]>>8)&0xff;
2631
2632 if(scsi_id > pHba->top_scsi_id){
2633 pHba->top_scsi_id = scsi_id;
2634 }
2635 if(scsi_lun > pHba->top_scsi_lun){
2636 pHba->top_scsi_lun = scsi_lun;
2637 }
2638 continue;
2639 }
2640
2641
2642 while(pDev) {
2643 if(pDev->scsi_lun == scsi_lun) {
2644 if(!scsi_device_online(pDev->pScsi_dev)) {
2645 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2646 pHba->name,bus_no,scsi_id,scsi_lun);
2647 if (pDev->pScsi_dev) {
2648 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2649 }
2650 }
2651 d = pDev->pI2o_dev;
2652 if(d->lct_data.tid != tid) {
2653 pDev->tid = tid;
2654 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2655 if (pDev->pScsi_dev) {
2656 pDev->pScsi_dev->changed = TRUE;
2657 pDev->pScsi_dev->removable = TRUE;
2658 }
2659 }
2660
2661 pDev->state = DPTI_DEV_ONLINE;
2662 break;
2663 }
2664 pDev = pDev->next_lun;
2665 }
2666 }
2667 }
2668 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2669 pDev =(struct adpt_device*) pI2o_dev->owner;
2670 if(!pDev){
2671 continue;
2672 }
2673
2674
2675 if (pDev->state & DPTI_DEV_UNSCANNED){
2676 pDev->state = DPTI_DEV_OFFLINE;
2677 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2678 if (pDev->pScsi_dev) {
2679 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2680 }
2681 }
2682 }
2683 return 0;
2684}
2685
2686static void adpt_fail_posted_scbs(adpt_hba* pHba)
2687{
2688 struct scsi_cmnd* cmd = NULL;
2689 struct scsi_device* d = NULL;
2690
2691 shost_for_each_device(d, pHba->host) {
2692 unsigned long flags;
2693 spin_lock_irqsave(&d->list_lock, flags);
2694 list_for_each_entry(cmd, &d->cmd_list, list) {
2695 if(cmd->serial_number == 0){
2696 continue;
2697 }
2698 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2699 cmd->scsi_done(cmd);
2700 }
2701 spin_unlock_irqrestore(&d->list_lock, flags);
2702 }
2703}
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716static int adpt_i2o_activate_hba(adpt_hba* pHba)
2717{
2718 int rcode;
2719
2720 if(pHba->initialized ) {
2721 if (adpt_i2o_status_get(pHba) < 0) {
2722 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2723 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2724 return rcode;
2725 }
2726 if (adpt_i2o_status_get(pHba) < 0) {
2727 printk(KERN_INFO "HBA not responding.\n");
2728 return -1;
2729 }
2730 }
2731
2732 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2733 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2734 return -1;
2735 }
2736
2737 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2738 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2739 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2740 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2741 adpt_i2o_reset_hba(pHba);
2742 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2743 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2744 return -1;
2745 }
2746 }
2747 } else {
2748 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2749 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2750 return rcode;
2751 }
2752
2753 }
2754
2755 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2756 return -1;
2757 }
2758
2759
2760
2761 if (adpt_i2o_hrt_get(pHba) < 0) {
2762 return -1;
2763 }
2764
2765 return 0;
2766}
2767
2768
2769
2770
2771
2772static int adpt_i2o_online_hba(adpt_hba* pHba)
2773{
2774 if (adpt_i2o_systab_send(pHba) < 0) {
2775 adpt_i2o_delete_hba(pHba);
2776 return -1;
2777 }
2778
2779
2780 if (adpt_i2o_enable_hba(pHba) < 0) {
2781 adpt_i2o_delete_hba(pHba);
2782 return -1;
2783 }
2784
2785
2786 return 0;
2787}
2788
2789static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2790{
2791 u32 __iomem *msg;
2792 ulong timeout = jiffies + 5*HZ;
2793
2794 while(m == EMPTY_QUEUE){
2795 rmb();
2796 m = readl(pHba->post_port);
2797 if(m != EMPTY_QUEUE){
2798 break;
2799 }
2800 if(time_after(jiffies,timeout)){
2801 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2802 return 2;
2803 }
2804 schedule_timeout_uninterruptible(1);
2805 }
2806 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2807 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2808 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2809 writel( 0,&msg[2]);
2810 wmb();
2811
2812 writel(m, pHba->post_port);
2813 wmb();
2814 return 0;
2815}
2816
2817static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2818{
2819 u8 *status;
2820 dma_addr_t addr;
2821 u32 __iomem *msg = NULL;
2822 int i;
2823 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2824 u32 m;
2825
2826 do {
2827 rmb();
2828 m = readl(pHba->post_port);
2829 if (m != EMPTY_QUEUE) {
2830 break;
2831 }
2832
2833 if(time_after(jiffies,timeout)){
2834 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2835 return -ETIMEDOUT;
2836 }
2837 schedule_timeout_uninterruptible(1);
2838 } while(m == EMPTY_QUEUE);
2839
2840 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2841
2842 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2843 if (!status) {
2844 adpt_send_nop(pHba, m);
2845 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2846 pHba->name);
2847 return -ENOMEM;
2848 }
2849 memset(status, 0, 4);
2850
2851 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2852 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2853 writel(0, &msg[2]);
2854 writel(0x0106, &msg[3]);
2855 writel(4096, &msg[4]);
2856 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]);
2857 writel(0xD0000004, &msg[6]);
2858 writel((u32)addr, &msg[7]);
2859
2860 writel(m, pHba->post_port);
2861 wmb();
2862
2863
2864 do {
2865 if (*status) {
2866 if (*status != 0x01 ) {
2867 break;
2868 }
2869 }
2870 rmb();
2871 if(time_after(jiffies,timeout)){
2872 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2873
2874
2875
2876
2877 return -ETIMEDOUT;
2878 }
2879 schedule_timeout_uninterruptible(1);
2880 } while (1);
2881
2882
2883
2884 if(*status != 0x04 ) {
2885 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2886 return -2;
2887 }
2888 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2889
2890 if(pHba->reply_pool != NULL) {
2891 dma_free_coherent(&pHba->pDev->dev,
2892 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2893 pHba->reply_pool, pHba->reply_pool_pa);
2894 }
2895
2896 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2897 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2898 &pHba->reply_pool_pa, GFP_KERNEL);
2899 if (!pHba->reply_pool) {
2900 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2901 return -ENOMEM;
2902 }
2903 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2904
2905 for(i = 0; i < pHba->reply_fifo_size; i++) {
2906 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2907 pHba->reply_port);
2908 wmb();
2909 }
2910 adpt_i2o_status_get(pHba);
2911 return 0;
2912}
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926static s32 adpt_i2o_status_get(adpt_hba* pHba)
2927{
2928 ulong timeout;
2929 u32 m;
2930 u32 __iomem *msg;
2931 u8 *status_block=NULL;
2932
2933 if(pHba->status_block == NULL) {
2934 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2935 sizeof(i2o_status_block),
2936 &pHba->status_block_pa, GFP_KERNEL);
2937 if(pHba->status_block == NULL) {
2938 printk(KERN_ERR
2939 "dpti%d: Get Status Block failed; Out of memory. \n",
2940 pHba->unit);
2941 return -ENOMEM;
2942 }
2943 }
2944 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2945 status_block = (u8*)(pHba->status_block);
2946 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2947 do {
2948 rmb();
2949 m = readl(pHba->post_port);
2950 if (m != EMPTY_QUEUE) {
2951 break;
2952 }
2953 if(time_after(jiffies,timeout)){
2954 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2955 pHba->name);
2956 return -ETIMEDOUT;
2957 }
2958 schedule_timeout_uninterruptible(1);
2959 } while(m==EMPTY_QUEUE);
2960
2961
2962 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2963
2964 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2965 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2966 writel(1, &msg[2]);
2967 writel(0, &msg[3]);
2968 writel(0, &msg[4]);
2969 writel(0, &msg[5]);
2970 writel( dma_low(pHba->status_block_pa), &msg[6]);
2971 writel( dma_high(pHba->status_block_pa), &msg[7]);
2972 writel(sizeof(i2o_status_block), &msg[8]);
2973
2974
2975 writel(m, pHba->post_port);
2976 wmb();
2977
2978 while(status_block[87]!=0xff){
2979 if(time_after(jiffies,timeout)){
2980 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2981 pHba->unit);
2982 return -ETIMEDOUT;
2983 }
2984 rmb();
2985 schedule_timeout_uninterruptible(1);
2986 }
2987
2988
2989 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2990 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2991 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2992 }
2993
2994 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2995 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2996 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2997 }
2998
2999
3000 if (dpt_dma64(pHba)) {
3001 pHba->sg_tablesize
3002 = ((pHba->status_block->inbound_frame_size * 4
3003 - 14 * sizeof(u32))
3004 / (sizeof(struct sg_simple_element) + sizeof(u32)));
3005 } else {
3006 pHba->sg_tablesize
3007 = ((pHba->status_block->inbound_frame_size * 4
3008 - 12 * sizeof(u32))
3009 / sizeof(struct sg_simple_element));
3010 }
3011 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3012 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3013 }
3014
3015
3016#ifdef DEBUG
3017 printk("dpti%d: State = ",pHba->unit);
3018 switch(pHba->status_block->iop_state) {
3019 case 0x01:
3020 printk("INIT\n");
3021 break;
3022 case 0x02:
3023 printk("RESET\n");
3024 break;
3025 case 0x04:
3026 printk("HOLD\n");
3027 break;
3028 case 0x05:
3029 printk("READY\n");
3030 break;
3031 case 0x08:
3032 printk("OPERATIONAL\n");
3033 break;
3034 case 0x10:
3035 printk("FAILED\n");
3036 break;
3037 case 0x11:
3038 printk("FAULTED\n");
3039 break;
3040 default:
3041 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3042 }
3043#endif
3044 return 0;
3045}
3046
3047
3048
3049
3050static int adpt_i2o_lct_get(adpt_hba* pHba)
3051{
3052 u32 msg[8];
3053 int ret;
3054 u32 buf[16];
3055
3056 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3057 pHba->lct_size = pHba->status_block->expected_lct_size;
3058 }
3059 do {
3060 if (pHba->lct == NULL) {
3061 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3062 pHba->lct_size, &pHba->lct_pa,
3063 GFP_ATOMIC);
3064 if(pHba->lct == NULL) {
3065 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3066 pHba->name);
3067 return -ENOMEM;
3068 }
3069 }
3070 memset(pHba->lct, 0, pHba->lct_size);
3071
3072 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3073 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3074 msg[2] = 0;
3075 msg[3] = 0;
3076 msg[4] = 0xFFFFFFFF;
3077 msg[5] = 0x00000000;
3078 msg[6] = 0xD0000000|pHba->lct_size;
3079 msg[7] = (u32)pHba->lct_pa;
3080
3081 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3082 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3083 pHba->name, ret);
3084 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3085 return ret;
3086 }
3087
3088 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3089 pHba->lct_size = pHba->lct->table_size << 2;
3090 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3091 pHba->lct, pHba->lct_pa);
3092 pHba->lct = NULL;
3093 }
3094 } while (pHba->lct == NULL);
3095
3096 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3097
3098
3099
3100 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3101 pHba->FwDebugBufferSize = buf[1];
3102 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3103 pHba->FwDebugBufferSize);
3104 if (pHba->FwDebugBuffer_P) {
3105 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3106 FW_DEBUG_FLAGS_OFFSET;
3107 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3108 FW_DEBUG_BLED_OFFSET;
3109 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3110 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3111 FW_DEBUG_STR_LENGTH_OFFSET;
3112 pHba->FwDebugBuffer_P += buf[2];
3113 pHba->FwDebugFlags = 0;
3114 }
3115 }
3116
3117 return 0;
3118}
3119
3120static int adpt_i2o_build_sys_table(void)
3121{
3122 adpt_hba* pHba = hba_chain;
3123 int count = 0;
3124
3125 if (sys_tbl)
3126 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3127 sys_tbl, sys_tbl_pa);
3128
3129 sys_tbl_len = sizeof(struct i2o_sys_tbl) +
3130 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3131
3132 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3133 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3134 if (!sys_tbl) {
3135 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3136 return -ENOMEM;
3137 }
3138 memset(sys_tbl, 0, sys_tbl_len);
3139
3140 sys_tbl->num_entries = hba_count;
3141 sys_tbl->version = I2OVERSION;
3142 sys_tbl->change_ind = sys_tbl_ind++;
3143
3144 for(pHba = hba_chain; pHba; pHba = pHba->next) {
3145 u64 addr;
3146
3147 if (adpt_i2o_status_get(pHba)) {
3148 sys_tbl->num_entries--;
3149 continue;
3150 }
3151
3152 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3153 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3154 sys_tbl->iops[count].seg_num = 0;
3155 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3156 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3157 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3158 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3159 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1;
3160 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3161 addr = pHba->base_addr_phys + 0x40;
3162 sys_tbl->iops[count].inbound_low = dma_low(addr);
3163 sys_tbl->iops[count].inbound_high = dma_high(addr);
3164
3165 count++;
3166 }
3167
3168#ifdef DEBUG
3169{
3170 u32 *table = (u32*)sys_tbl;
3171 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3172 for(count = 0; count < (sys_tbl_len >>2); count++) {
3173 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3174 count, table[count]);
3175 }
3176}
3177#endif
3178
3179 return 0;
3180}
3181
3182
3183
3184
3185
3186
3187static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3188{
3189 char buf[64];
3190 int unit = d->lct_data.tid;
3191
3192 printk(KERN_INFO "TID %3.3d ", unit);
3193
3194 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3195 {
3196 buf[16]=0;
3197 printk(" Vendor: %-12.12s", buf);
3198 }
3199 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3200 {
3201 buf[16]=0;
3202 printk(" Device: %-12.12s", buf);
3203 }
3204 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3205 {
3206 buf[8]=0;
3207 printk(" Rev: %-12.12s\n", buf);
3208 }
3209#ifdef DEBUG
3210 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3211 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3212 printk(KERN_INFO "\tFlags: ");
3213
3214 if(d->lct_data.device_flags&(1<<0))
3215 printk("C");
3216 if(d->lct_data.device_flags&(1<<1))
3217 printk("U");
3218 if(!(d->lct_data.device_flags&(1<<4)))
3219 printk("P");
3220 if(!(d->lct_data.device_flags&(1<<5)))
3221 printk("M");
3222 printk("\n");
3223#endif
3224}
3225
3226#ifdef DEBUG
3227
3228
3229
3230static const char *adpt_i2o_get_class_name(int class)
3231{
3232 int idx = 16;
3233 static char *i2o_class_name[] = {
3234 "Executive",
3235 "Device Driver Module",
3236 "Block Device",
3237 "Tape Device",
3238 "LAN Interface",
3239 "WAN Interface",
3240 "Fibre Channel Port",
3241 "Fibre Channel Device",
3242 "SCSI Device",
3243 "ATE Port",
3244 "ATE Device",
3245 "Floppy Controller",
3246 "Floppy Device",
3247 "Secondary Bus Port",
3248 "Peer Transport Agent",
3249 "Peer Transport",
3250 "Unknown"
3251 };
3252
3253 switch(class&0xFFF) {
3254 case I2O_CLASS_EXECUTIVE:
3255 idx = 0; break;
3256 case I2O_CLASS_DDM:
3257 idx = 1; break;
3258 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3259 idx = 2; break;
3260 case I2O_CLASS_SEQUENTIAL_STORAGE:
3261 idx = 3; break;
3262 case I2O_CLASS_LAN:
3263 idx = 4; break;
3264 case I2O_CLASS_WAN:
3265 idx = 5; break;
3266 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3267 idx = 6; break;
3268 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3269 idx = 7; break;
3270 case I2O_CLASS_SCSI_PERIPHERAL:
3271 idx = 8; break;
3272 case I2O_CLASS_ATE_PORT:
3273 idx = 9; break;
3274 case I2O_CLASS_ATE_PERIPHERAL:
3275 idx = 10; break;
3276 case I2O_CLASS_FLOPPY_CONTROLLER:
3277 idx = 11; break;
3278 case I2O_CLASS_FLOPPY_DEVICE:
3279 idx = 12; break;
3280 case I2O_CLASS_BUS_ADAPTER_PORT:
3281 idx = 13; break;
3282 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3283 idx = 14; break;
3284 case I2O_CLASS_PEER_TRANSPORT:
3285 idx = 15; break;
3286 }
3287 return i2o_class_name[idx];
3288}
3289#endif
3290
3291
3292static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3293{
3294 u32 msg[6];
3295 int ret, size = sizeof(i2o_hrt);
3296
3297 do {
3298 if (pHba->hrt == NULL) {
3299 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3300 size, &pHba->hrt_pa, GFP_KERNEL);
3301 if (pHba->hrt == NULL) {
3302 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3303 return -ENOMEM;
3304 }
3305 }
3306
3307 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3308 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3309 msg[2]= 0;
3310 msg[3]= 0;
3311 msg[4]= (0xD0000000 | size);
3312 msg[5]= (u32)pHba->hrt_pa;
3313
3314 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3315 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3316 return ret;
3317 }
3318
3319 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3320 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3321 dma_free_coherent(&pHba->pDev->dev, size,
3322 pHba->hrt, pHba->hrt_pa);
3323 size = newsize;
3324 pHba->hrt = NULL;
3325 }
3326 } while(pHba->hrt == NULL);
3327 return 0;
3328}
3329
3330
3331
3332
3333static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3334 int group, int field, void *buf, int buflen)
3335{
3336 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3337 u8 *opblk_va;
3338 dma_addr_t opblk_pa;
3339 u8 *resblk_va;
3340 dma_addr_t resblk_pa;
3341
3342 int size;
3343
3344
3345 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3346 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3347 if (resblk_va == NULL) {
3348 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3349 return -ENOMEM;
3350 }
3351
3352 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3353 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3354 if (opblk_va == NULL) {
3355 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3356 resblk_va, resblk_pa);
3357 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3358 pHba->name);
3359 return -ENOMEM;
3360 }
3361 if (field == -1)
3362 opblk[4] = -1;
3363
3364 memcpy(opblk_va, opblk, sizeof(opblk));
3365 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3366 opblk_va, opblk_pa, sizeof(opblk),
3367 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3368 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3369 if (size == -ETIME) {
3370 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3371 resblk_va, resblk_pa);
3372 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3373 return -ETIME;
3374 } else if (size == -EINTR) {
3375 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3376 resblk_va, resblk_pa);
3377 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3378 return -EINTR;
3379 }
3380
3381 memcpy(buf, resblk_va+8, buflen);
3382
3383 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3384 resblk_va, resblk_pa);
3385 if (size < 0)
3386 return size;
3387
3388 return buflen;
3389}
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3401 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3402 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3403{
3404 u32 msg[9];
3405 u32 *res = (u32 *)resblk_va;
3406 int wait_status;
3407
3408 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3409 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3410 msg[2] = 0;
3411 msg[3] = 0;
3412 msg[4] = 0;
3413 msg[5] = 0x54000000 | oplen;
3414 msg[6] = (u32)opblk_pa;
3415 msg[7] = 0xD0000000 | reslen;
3416 msg[8] = (u32)resblk_pa;
3417
3418 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3419 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3420 return wait_status;
3421 }
3422
3423 if (res[1]&0x00FF0000) {
3424 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3425 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3426 pHba->name,
3427 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3428 : "PARAMS_GET",
3429 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3430 return -((res[1] >> 16) & 0xFF);
3431 }
3432
3433 return 4 + ((res[1] & 0x0000FFFF) << 2);
3434}
3435
3436
3437static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3438{
3439 u32 msg[4];
3440 int ret;
3441
3442 adpt_i2o_status_get(pHba);
3443
3444
3445
3446 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3447 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3448 return 0;
3449 }
3450
3451 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3452 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3453 msg[2] = 0;
3454 msg[3] = 0;
3455
3456 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3457 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3458 pHba->unit, -ret);
3459 } else {
3460 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3461 }
3462
3463 adpt_i2o_status_get(pHba);
3464 return ret;
3465}
3466
3467
3468
3469
3470
3471static int adpt_i2o_enable_hba(adpt_hba* pHba)
3472{
3473 u32 msg[4];
3474 int ret;
3475
3476 adpt_i2o_status_get(pHba);
3477 if(!pHba->status_block){
3478 return -ENOMEM;
3479 }
3480
3481 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3482 return 0;
3483
3484 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3485 return -EINVAL;
3486
3487 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3488 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3489 msg[2]= 0;
3490 msg[3]= 0;
3491
3492 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3493 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3494 pHba->name, ret);
3495 } else {
3496 PDEBUG("%s: Enabled.\n", pHba->name);
3497 }
3498
3499 adpt_i2o_status_get(pHba);
3500 return ret;
3501}
3502
3503
3504static int adpt_i2o_systab_send(adpt_hba* pHba)
3505{
3506 u32 msg[12];
3507 int ret;
3508
3509 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3510 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3511 msg[2] = 0;
3512 msg[3] = 0;
3513 msg[4] = (0<<16) | ((pHba->unit+2) << 12);
3514 msg[5] = 0;
3515
3516
3517
3518
3519
3520
3521 msg[6] = 0x54000000 | sys_tbl_len;
3522 msg[7] = (u32)sys_tbl_pa;
3523 msg[8] = 0x54000000 | 0;
3524 msg[9] = 0;
3525 msg[10] = 0xD4000000 | 0;
3526 msg[11] = 0;
3527
3528 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3529 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3530 pHba->name, ret);
3531 }
3532#ifdef DEBUG
3533 else {
3534 PINFO("%s: SysTab set.\n", pHba->name);
3535 }
3536#endif
3537
3538 return ret;
3539 }
3540
3541
3542
3543
3544
3545
3546
3547
3548#ifdef UARTDELAY
3549
3550static static void adpt_delay(int millisec)
3551{
3552 int i;
3553 for (i = 0; i < millisec; i++) {
3554 udelay(1000);
3555 }
3556}
3557
3558#endif
3559
3560static struct scsi_host_template driver_template = {
3561 .module = THIS_MODULE,
3562 .name = "dpt_i2o",
3563 .proc_name = "dpt_i2o",
3564 .show_info = adpt_show_info,
3565 .info = adpt_info,
3566 .queuecommand = adpt_queue,
3567 .eh_abort_handler = adpt_abort,
3568 .eh_device_reset_handler = adpt_device_reset,
3569 .eh_bus_reset_handler = adpt_bus_reset,
3570 .eh_host_reset_handler = adpt_reset,
3571 .bios_param = adpt_bios_param,
3572 .slave_configure = adpt_slave_configure,
3573 .can_queue = MAX_TO_IOP_MESSAGES,
3574 .this_id = 7,
3575 .cmd_per_lun = 1,
3576 .use_clustering = ENABLE_CLUSTERING,
3577};
3578
3579static int __init adpt_init(void)
3580{
3581 int error;
3582 adpt_hba *pHba, *next;
3583
3584 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3585
3586 error = adpt_detect(&driver_template);
3587 if (error < 0)
3588 return error;
3589 if (hba_chain == NULL)
3590 return -ENODEV;
3591
3592 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3593 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3594 if (error)
3595 goto fail;
3596 scsi_scan_host(pHba->host);
3597 }
3598 return 0;
3599fail:
3600 for (pHba = hba_chain; pHba; pHba = next) {
3601 next = pHba->next;
3602 scsi_remove_host(pHba->host);
3603 }
3604 return error;
3605}
3606
3607static void __exit adpt_exit(void)
3608{
3609 adpt_hba *pHba, *next;
3610
3611 for (pHba = hba_chain; pHba; pHba = pHba->next)
3612 scsi_remove_host(pHba->host);
3613 for (pHba = hba_chain; pHba; pHba = next) {
3614 next = pHba->next;
3615 adpt_release(pHba->host);
3616 }
3617}
3618
3619module_init(adpt_init);
3620module_exit(adpt_exit);
3621
3622MODULE_LICENSE("GPL");
3623