1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/module.h>
33
34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36
37
38
39#include <linux/ioctl.h>
40#include <asm/uaccess.h>
41
42#include <linux/stat.h>
43#include <linux/slab.h>
44#include <linux/pci.h>
45#include <linux/proc_fs.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h>
48#include <linux/interrupt.h>
49#include <linux/kernel.h>
50#include <linux/sched.h>
51#include <linux/reboot.h>
52#include <linux/spinlock.h>
53#include <linux/dma-mapping.h>
54
55#include <linux/timer.h>
56#include <linux/string.h>
57#include <linux/ioport.h>
58#include <linux/mutex.h>
59
60#include <asm/processor.h>
61#include <asm/pgtable.h>
62#include <asm/io.h>
63
64#include <scsi/scsi.h>
65#include <scsi/scsi_cmnd.h>
66#include <scsi/scsi_device.h>
67#include <scsi/scsi_host.h>
68#include <scsi/scsi_tcq.h>
69
70#include "dpt/dptsig.h"
71#include "dpti.h"
72
73
74
75
76
77
78static DEFINE_MUTEX(adpt_mutex);
79static dpt_sig_S DPTI_sig = {
80 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81#ifdef __i386__
82 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83#elif defined(__ia64__)
84 PROC_INTEL, PROC_IA64,
85#elif defined(__sparc__)
86 PROC_ULTRASPARC, PROC_ULTRASPARC,
87#elif defined(__alpha__)
88 PROC_ALPHA, PROC_ALPHA,
89#else
90 (-1),(-1),
91#endif
92 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
95};
96
97
98
99
100
101
102
103
104
105static DEFINE_MUTEX(adpt_configuration_lock);
106
107static struct i2o_sys_tbl *sys_tbl;
108static dma_addr_t sys_tbl_pa;
109static int sys_tbl_ind;
110static int sys_tbl_len;
111
112static adpt_hba* hba_chain = NULL;
113static int hba_count = 0;
114
115static struct class *adpt_sysfs_class;
116
117static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
118#ifdef CONFIG_COMPAT
119static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
120#endif
121
122static const struct file_operations adpt_fops = {
123 .unlocked_ioctl = adpt_unlocked_ioctl,
124 .open = adpt_open,
125 .release = adpt_close,
126#ifdef CONFIG_COMPAT
127 .compat_ioctl = compat_adpt_ioctl,
128#endif
129 .llseek = noop_llseek,
130};
131
132
133
134
135struct adpt_i2o_post_wait_data
136{
137 int status;
138 u32 id;
139 adpt_wait_queue_head_t *wq;
140 struct adpt_i2o_post_wait_data *next;
141};
142
143static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
144static u32 adpt_post_wait_id = 0;
145static DEFINE_SPINLOCK(adpt_post_wait_lock);
146
147
148
149
150
151
152
153static inline int dpt_dma64(adpt_hba *pHba)
154{
155 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
156}
157
158static inline u32 dma_high(dma_addr_t addr)
159{
160 return upper_32_bits(addr);
161}
162
163static inline u32 dma_low(dma_addr_t addr)
164{
165 return (u32)addr;
166}
167
168static u8 adpt_read_blink_led(adpt_hba* host)
169{
170 if (host->FwDebugBLEDflag_P) {
171 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
172 return readb(host->FwDebugBLEDvalue_P);
173 }
174 }
175 return 0;
176}
177
178
179
180
181
182
183static struct pci_device_id dptids[] = {
184 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
185 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
186 { 0, }
187};
188MODULE_DEVICE_TABLE(pci,dptids);
189
190static int adpt_detect(struct scsi_host_template* sht)
191{
192 struct pci_dev *pDev = NULL;
193 adpt_hba *pHba;
194 adpt_hba *next;
195
196 PINFO("Detecting Adaptec I2O RAID controllers...\n");
197
198
199 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
200 if(pDev->device == PCI_DPT_DEVICE_ID ||
201 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
202 if(adpt_install_hba(sht, pDev) ){
203 PERROR("Could not Init an I2O RAID device\n");
204 PERROR("Will not try to detect others.\n");
205 return hba_count-1;
206 }
207 pci_dev_get(pDev);
208 }
209 }
210
211
212 for (pHba = hba_chain; pHba; pHba = next) {
213 next = pHba->next;
214
215 if (adpt_i2o_activate_hba(pHba) < 0) {
216 adpt_i2o_delete_hba(pHba);
217 }
218 }
219
220
221
222
223rebuild_sys_tab:
224 if (hba_chain == NULL)
225 return 0;
226
227
228
229
230
231 if (adpt_i2o_build_sys_table() < 0) {
232 adpt_i2o_sys_shutdown();
233 return 0;
234 }
235
236 PDEBUG("HBA's in HOLD state\n");
237
238
239 for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 if (adpt_i2o_online_hba(pHba) < 0) {
241 adpt_i2o_delete_hba(pHba);
242 goto rebuild_sys_tab;
243 }
244 }
245
246
247 PDEBUG("HBA's in OPERATIONAL state\n");
248
249 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
250 for (pHba = hba_chain; pHba; pHba = next) {
251 next = pHba->next;
252 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 if (adpt_i2o_lct_get(pHba) < 0){
254 adpt_i2o_delete_hba(pHba);
255 continue;
256 }
257
258 if (adpt_i2o_parse_lct(pHba) < 0){
259 adpt_i2o_delete_hba(pHba);
260 continue;
261 }
262 adpt_inquiry(pHba);
263 }
264
265 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 if (IS_ERR(adpt_sysfs_class)) {
267 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 adpt_sysfs_class = NULL;
269 }
270
271 for (pHba = hba_chain; pHba; pHba = next) {
272 next = pHba->next;
273 if (adpt_scsi_host_alloc(pHba, sht) < 0){
274 adpt_i2o_delete_hba(pHba);
275 continue;
276 }
277 pHba->initialized = TRUE;
278 pHba->state &= ~DPTI_STATE_RESET;
279 if (adpt_sysfs_class) {
280 struct device *dev = device_create(adpt_sysfs_class,
281 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
282 "dpti%d", pHba->unit);
283 if (IS_ERR(dev)) {
284 printk(KERN_WARNING"dpti%d: unable to "
285 "create device in dpt_i2o class\n",
286 pHba->unit);
287 }
288 }
289 }
290
291
292
293
294 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
295 adpt_i2o_sys_shutdown();
296 return 0;
297 }
298 return hba_count;
299}
300
301
302
303
304
305static int adpt_release(struct Scsi_Host *host)
306{
307 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
308
309 adpt_i2o_delete_hba(pHba);
310 scsi_unregister(host);
311 return 0;
312}
313
314
315static void adpt_inquiry(adpt_hba* pHba)
316{
317 u32 msg[17];
318 u32 *mptr;
319 u32 *lenptr;
320 int direction;
321 int scsidir;
322 u32 len;
323 u32 reqlen;
324 u8* buf;
325 dma_addr_t addr;
326 u8 scb[16];
327 s32 rcode;
328
329 memset(msg, 0, sizeof(msg));
330 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
331 if(!buf){
332 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
333 return;
334 }
335 memset((void*)buf, 0, 36);
336
337 len = 36;
338 direction = 0x00000000;
339 scsidir =0x40000000;
340
341 if (dpt_dma64(pHba))
342 reqlen = 17;
343 else
344 reqlen = 14;
345
346 msg[0] = reqlen<<16 | SGL_OFFSET_12;
347 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
348 msg[2] = 0;
349 msg[3] = 0;
350
351 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
352 msg[5] = ADAPTER_TID | 1<<16 ;
353
354
355
356
357 msg[6] = scsidir|0x20a00000| 6 ;
358
359 mptr=msg+7;
360
361 memset(scb, 0, sizeof(scb));
362
363 scb[0] = INQUIRY;
364 scb[1] = 0;
365 scb[2] = 0;
366 scb[3] = 0;
367 scb[4] = 36;
368 scb[5] = 0;
369
370
371 memcpy(mptr, scb, sizeof(scb));
372 mptr+=4;
373 lenptr=mptr++;
374
375
376 *lenptr = len;
377 if (dpt_dma64(pHba)) {
378 *mptr++ = (0x7C<<24)+(2<<16)+0x02;
379 *mptr++ = 1 << PAGE_SHIFT;
380 *mptr++ = 0xD0000000|direction|len;
381 *mptr++ = dma_low(addr);
382 *mptr++ = dma_high(addr);
383 } else {
384 *mptr++ = 0xD0000000|direction|len;
385 *mptr++ = addr;
386 }
387
388
389 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
390 if (rcode != 0) {
391 sprintf(pHba->detail, "Adaptec I2O RAID");
392 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
393 if (rcode != -ETIME && rcode != -EINTR)
394 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
395 } else {
396 memset(pHba->detail, 0, sizeof(pHba->detail));
397 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
398 memcpy(&(pHba->detail[16]), " Model: ", 8);
399 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
400 memcpy(&(pHba->detail[40]), " FW: ", 4);
401 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
402 pHba->detail[48] = '\0';
403 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
404 }
405 adpt_i2o_status_get(pHba);
406 return ;
407}
408
409
410static int adpt_slave_configure(struct scsi_device * device)
411{
412 struct Scsi_Host *host = device->host;
413 adpt_hba* pHba;
414
415 pHba = (adpt_hba *) host->hostdata[0];
416
417 if (host->can_queue && device->tagged_supported) {
418 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
419 host->can_queue - 1);
420 } else {
421 scsi_adjust_queue_depth(device, 0, 1);
422 }
423 return 0;
424}
425
426static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
427{
428 adpt_hba* pHba = NULL;
429 struct adpt_device* pDev = NULL;
430
431 cmd->scsi_done = done;
432
433
434
435
436
437
438
439 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
440 cmd->result = (DID_OK << 16);
441 cmd->scsi_done(cmd);
442 return 0;
443 }
444
445 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
446 if (!pHba) {
447 return FAILED;
448 }
449
450 rmb();
451 if((pHba->state) & DPTI_STATE_RESET)
452 return SCSI_MLQUEUE_HOST_BUSY;
453
454
455
456 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
457
458
459
460
461
462 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
463
464
465 cmd->result = (DID_NO_CONNECT << 16);
466 cmd->scsi_done(cmd);
467 return 0;
468 }
469 cmd->device->hostdata = pDev;
470 }
471 pDev->pScsi_dev = cmd->device;
472
473
474
475
476
477 if (pDev->state & DPTI_DEV_RESET ) {
478 return FAILED;
479 }
480 return adpt_scsi_to_i2o(pHba, cmd, pDev);
481}
482
483static DEF_SCSI_QCMD(adpt_queue)
484
485static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
486 sector_t capacity, int geom[])
487{
488 int heads=-1;
489 int sectors=-1;
490 int cylinders=-1;
491
492
493
494
495 if (capacity < 0x2000 ) {
496 heads = 18;
497 sectors = 2;
498 }
499
500 else if (capacity < 0x20000) {
501 heads = 64;
502 sectors = 32;
503 }
504
505 else if (capacity < 0x40000) {
506 heads = 65;
507 sectors = 63;
508 }
509
510 else if (capacity < 0x80000) {
511 heads = 128;
512 sectors = 63;
513 }
514
515 else {
516 heads = 255;
517 sectors = 63;
518 }
519 cylinders = sector_div(capacity, heads * sectors);
520
521
522 if(sdev->type == 5) {
523 heads = 252;
524 sectors = 63;
525 cylinders = 1111;
526 }
527
528 geom[0] = heads;
529 geom[1] = sectors;
530 geom[2] = cylinders;
531
532 PDEBUG("adpt_bios_param: exit\n");
533 return 0;
534}
535
536
537static const char *adpt_info(struct Scsi_Host *host)
538{
539 adpt_hba* pHba;
540
541 pHba = (adpt_hba *) host->hostdata[0];
542 return (char *) (pHba->detail);
543}
544
545static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
546{
547 struct adpt_device* d;
548 int id;
549 int chan;
550 adpt_hba* pHba;
551 int unit;
552
553
554 mutex_lock(&adpt_configuration_lock);
555 for (pHba = hba_chain; pHba; pHba = pHba->next) {
556 if (pHba->host == host) {
557 break;
558 }
559 }
560 mutex_unlock(&adpt_configuration_lock);
561 if (pHba == NULL) {
562 return 0;
563 }
564 host = pHba->host;
565
566 seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
567 seq_printf(m, "%s\n", pHba->detail);
568 seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
569 pHba->host->host_no, pHba->name, host->irq);
570 seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
571 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
572
573 seq_printf(m, "Devices:\n");
574 for(chan = 0; chan < MAX_CHANNEL; chan++) {
575 for(id = 0; id < MAX_ID; id++) {
576 d = pHba->channel[chan].device[id];
577 while(d) {
578 seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
579 seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
580
581 unit = d->pI2o_dev->lct_data.tid;
582 seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
583 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
584 scsi_device_online(d->pScsi_dev)? "online":"offline");
585 d = d->next_lun;
586 }
587 }
588 }
589 return 0;
590}
591
592
593
594
595static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
596{
597 return (u32)cmd->serial_number;
598}
599
600
601
602
603
604static struct scsi_cmnd *
605 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
606{
607 struct scsi_cmnd * cmd;
608 struct scsi_device * d;
609
610 if (context == 0)
611 return NULL;
612
613 spin_unlock(pHba->host->host_lock);
614 shost_for_each_device(d, pHba->host) {
615 unsigned long flags;
616 spin_lock_irqsave(&d->list_lock, flags);
617 list_for_each_entry(cmd, &d->cmd_list, list) {
618 if (((u32)cmd->serial_number == context)) {
619 spin_unlock_irqrestore(&d->list_lock, flags);
620 scsi_device_put(d);
621 spin_lock(pHba->host->host_lock);
622 return cmd;
623 }
624 }
625 spin_unlock_irqrestore(&d->list_lock, flags);
626 }
627 spin_lock(pHba->host->host_lock);
628
629 return NULL;
630}
631
632
633
634
635static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
636{
637#if BITS_PER_LONG == 32
638 return (u32)(unsigned long)reply;
639#else
640 ulong flags = 0;
641 u32 nr, i;
642
643 spin_lock_irqsave(pHba->host->host_lock, flags);
644 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
645 for (i = 0; i < nr; i++) {
646 if (pHba->ioctl_reply_context[i] == NULL) {
647 pHba->ioctl_reply_context[i] = reply;
648 break;
649 }
650 }
651 spin_unlock_irqrestore(pHba->host->host_lock, flags);
652 if (i >= nr) {
653 kfree (reply);
654 printk(KERN_WARNING"%s: Too many outstanding "
655 "ioctl commands\n", pHba->name);
656 return (u32)-1;
657 }
658
659 return i;
660#endif
661}
662
663
664
665
666static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
667{
668#if BITS_PER_LONG == 32
669 return (void *)(unsigned long)context;
670#else
671 void *p = pHba->ioctl_reply_context[context];
672 pHba->ioctl_reply_context[context] = NULL;
673
674 return p;
675#endif
676}
677
678
679
680
681
682
683static int adpt_abort(struct scsi_cmnd * cmd)
684{
685 adpt_hba* pHba = NULL;
686 struct adpt_device* dptdevice;
687 u32 msg[5];
688 int rcode;
689
690 if(cmd->serial_number == 0){
691 return FAILED;
692 }
693 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
694 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
695 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
696 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
697 return FAILED;
698 }
699
700 memset(msg, 0, sizeof(msg));
701 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
702 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
703 msg[2] = 0;
704 msg[3]= 0;
705 msg[4] = adpt_cmd_to_context(cmd);
706 if (pHba->host)
707 spin_lock_irq(pHba->host->host_lock);
708 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
709 if (pHba->host)
710 spin_unlock_irq(pHba->host->host_lock);
711 if (rcode != 0) {
712 if(rcode == -EOPNOTSUPP ){
713 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
714 return FAILED;
715 }
716 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
717 return FAILED;
718 }
719 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
720 return SUCCESS;
721}
722
723
724#define I2O_DEVICE_RESET 0x27
725
726
727
728static int adpt_device_reset(struct scsi_cmnd* cmd)
729{
730 adpt_hba* pHba;
731 u32 msg[4];
732 u32 rcode;
733 int old_state;
734 struct adpt_device* d = cmd->device->hostdata;
735
736 pHba = (void*) cmd->device->host->hostdata[0];
737 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
738 if (!d) {
739 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
740 return FAILED;
741 }
742 memset(msg, 0, sizeof(msg));
743 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
744 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
745 msg[2] = 0;
746 msg[3] = 0;
747
748 if (pHba->host)
749 spin_lock_irq(pHba->host->host_lock);
750 old_state = d->state;
751 d->state |= DPTI_DEV_RESET;
752 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
753 d->state = old_state;
754 if (pHba->host)
755 spin_unlock_irq(pHba->host->host_lock);
756 if (rcode != 0) {
757 if(rcode == -EOPNOTSUPP ){
758 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
759 return FAILED;
760 }
761 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
762 return FAILED;
763 } else {
764 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
765 return SUCCESS;
766 }
767}
768
769
770#define I2O_HBA_BUS_RESET 0x87
771
772static int adpt_bus_reset(struct scsi_cmnd* cmd)
773{
774 adpt_hba* pHba;
775 u32 msg[4];
776 u32 rcode;
777
778 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
779 memset(msg, 0, sizeof(msg));
780 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
781 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
782 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
783 msg[2] = 0;
784 msg[3] = 0;
785 if (pHba->host)
786 spin_lock_irq(pHba->host->host_lock);
787 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
788 if (pHba->host)
789 spin_unlock_irq(pHba->host->host_lock);
790 if (rcode != 0) {
791 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
792 return FAILED;
793 } else {
794 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
795 return SUCCESS;
796 }
797}
798
799
800static int __adpt_reset(struct scsi_cmnd* cmd)
801{
802 adpt_hba* pHba;
803 int rcode;
804 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
805 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
806 rcode = adpt_hba_reset(pHba);
807 if(rcode == 0){
808 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
809 return SUCCESS;
810 } else {
811 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
812 return FAILED;
813 }
814}
815
816static int adpt_reset(struct scsi_cmnd* cmd)
817{
818 int rc;
819
820 spin_lock_irq(cmd->device->host->host_lock);
821 rc = __adpt_reset(cmd);
822 spin_unlock_irq(cmd->device->host->host_lock);
823
824 return rc;
825}
826
827
828static int adpt_hba_reset(adpt_hba* pHba)
829{
830 int rcode;
831
832 pHba->state |= DPTI_STATE_RESET;
833
834
835 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
836 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
837 adpt_i2o_delete_hba(pHba);
838 return rcode;
839 }
840
841 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
842 adpt_i2o_delete_hba(pHba);
843 return rcode;
844 }
845 PDEBUG("%s: in HOLD state\n",pHba->name);
846
847 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
848 adpt_i2o_delete_hba(pHba);
849 return rcode;
850 }
851 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
852
853 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
854 adpt_i2o_delete_hba(pHba);
855 return rcode;
856 }
857
858 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
859 adpt_i2o_delete_hba(pHba);
860 return rcode;
861 }
862 pHba->state &= ~DPTI_STATE_RESET;
863
864 adpt_fail_posted_scbs(pHba);
865 return 0;
866}
867
868
869
870
871
872
873
874static void adpt_i2o_sys_shutdown(void)
875{
876 adpt_hba *pHba, *pNext;
877 struct adpt_i2o_post_wait_data *p1, *old;
878
879 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
880 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
881
882
883
884
885 for (pHba = hba_chain; pHba; pHba = pNext) {
886 pNext = pHba->next;
887 adpt_i2o_delete_hba(pHba);
888 }
889
890
891
892
893
894
895 for(p1 = adpt_post_wait_queue; p1;) {
896 old = p1;
897 p1 = p1->next;
898 kfree(old);
899 }
900
901 adpt_post_wait_queue = NULL;
902
903 printk(KERN_INFO "Adaptec I2O controllers down.\n");
904}
905
906static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
907{
908
909 adpt_hba* pHba = NULL;
910 adpt_hba* p = NULL;
911 ulong base_addr0_phys = 0;
912 ulong base_addr1_phys = 0;
913 u32 hba_map0_area_size = 0;
914 u32 hba_map1_area_size = 0;
915 void __iomem *base_addr_virt = NULL;
916 void __iomem *msg_addr_virt = NULL;
917 int dma64 = 0;
918
919 int raptorFlag = FALSE;
920
921 if(pci_enable_device(pDev)) {
922 return -EINVAL;
923 }
924
925 if (pci_request_regions(pDev, "dpt_i2o")) {
926 PERROR("dpti: adpt_config_hba: pci request region failed\n");
927 return -EINVAL;
928 }
929
930 pci_set_master(pDev);
931
932
933
934
935 if (sizeof(dma_addr_t) > 4 &&
936 pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
937 if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
938 dma64 = 1;
939 }
940 if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
941 return -EINVAL;
942
943
944 pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
945
946 base_addr0_phys = pci_resource_start(pDev,0);
947 hba_map0_area_size = pci_resource_len(pDev,0);
948
949
950 if(pDev->device == PCI_DPT_DEVICE_ID){
951 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
952
953 hba_map0_area_size = 0x400000;
954 } else {
955 if(hba_map0_area_size > 0x100000 ){
956 hba_map0_area_size = 0x100000;
957 }
958 }
959 } else {
960
961 base_addr1_phys = pci_resource_start(pDev,1);
962 hba_map1_area_size = pci_resource_len(pDev,1);
963 raptorFlag = TRUE;
964 }
965
966#if BITS_PER_LONG == 64
967
968
969
970
971
972
973
974 if (raptorFlag == TRUE) {
975 if (hba_map0_area_size > 128)
976 hba_map0_area_size = 128;
977 if (hba_map1_area_size > 524288)
978 hba_map1_area_size = 524288;
979 } else {
980 if (hba_map0_area_size > 524288)
981 hba_map0_area_size = 524288;
982 }
983#endif
984
985 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
986 if (!base_addr_virt) {
987 pci_release_regions(pDev);
988 PERROR("dpti: adpt_config_hba: io remap failed\n");
989 return -EINVAL;
990 }
991
992 if(raptorFlag == TRUE) {
993 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
994 if (!msg_addr_virt) {
995 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
996 iounmap(base_addr_virt);
997 pci_release_regions(pDev);
998 return -EINVAL;
999 }
1000 } else {
1001 msg_addr_virt = base_addr_virt;
1002 }
1003
1004
1005 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1006 if (!pHba) {
1007 if (msg_addr_virt != base_addr_virt)
1008 iounmap(msg_addr_virt);
1009 iounmap(base_addr_virt);
1010 pci_release_regions(pDev);
1011 return -ENOMEM;
1012 }
1013
1014 mutex_lock(&adpt_configuration_lock);
1015
1016 if(hba_chain != NULL){
1017 for(p = hba_chain; p->next; p = p->next);
1018 p->next = pHba;
1019 } else {
1020 hba_chain = pHba;
1021 }
1022 pHba->next = NULL;
1023 pHba->unit = hba_count;
1024 sprintf(pHba->name, "dpti%d", hba_count);
1025 hba_count++;
1026
1027 mutex_unlock(&adpt_configuration_lock);
1028
1029 pHba->pDev = pDev;
1030 pHba->base_addr_phys = base_addr0_phys;
1031
1032
1033 pHba->base_addr_virt = base_addr_virt;
1034 pHba->msg_addr_virt = msg_addr_virt;
1035 pHba->irq_mask = base_addr_virt+0x30;
1036 pHba->post_port = base_addr_virt+0x40;
1037 pHba->reply_port = base_addr_virt+0x44;
1038
1039 pHba->hrt = NULL;
1040 pHba->lct = NULL;
1041 pHba->lct_size = 0;
1042 pHba->status_block = NULL;
1043 pHba->post_count = 0;
1044 pHba->state = DPTI_STATE_RESET;
1045 pHba->pDev = pDev;
1046 pHba->devices = NULL;
1047 pHba->dma64 = dma64;
1048
1049
1050 spin_lock_init(&pHba->state_lock);
1051 spin_lock_init(&adpt_post_wait_lock);
1052
1053 if(raptorFlag == 0){
1054 printk(KERN_INFO "Adaptec I2O RAID controller"
1055 " %d at %p size=%x irq=%d%s\n",
1056 hba_count-1, base_addr_virt,
1057 hba_map0_area_size, pDev->irq,
1058 dma64 ? " (64-bit DMA)" : "");
1059 } else {
1060 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1061 hba_count-1, pDev->irq,
1062 dma64 ? " (64-bit DMA)" : "");
1063 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1064 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1065 }
1066
1067 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1068 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1069 adpt_i2o_delete_hba(pHba);
1070 return -EINVAL;
1071 }
1072
1073 return 0;
1074}
1075
1076
1077static void adpt_i2o_delete_hba(adpt_hba* pHba)
1078{
1079 adpt_hba* p1;
1080 adpt_hba* p2;
1081 struct i2o_device* d;
1082 struct i2o_device* next;
1083 int i;
1084 int j;
1085 struct adpt_device* pDev;
1086 struct adpt_device* pNext;
1087
1088
1089 mutex_lock(&adpt_configuration_lock);
1090
1091
1092 if(pHba->host){
1093 free_irq(pHba->host->irq, pHba);
1094 }
1095 p2 = NULL;
1096 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1097 if(p1 == pHba) {
1098 if(p2) {
1099 p2->next = p1->next;
1100 } else {
1101 hba_chain = p1->next;
1102 }
1103 break;
1104 }
1105 }
1106
1107 hba_count--;
1108 mutex_unlock(&adpt_configuration_lock);
1109
1110 iounmap(pHba->base_addr_virt);
1111 pci_release_regions(pHba->pDev);
1112 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1113 iounmap(pHba->msg_addr_virt);
1114 }
1115 if(pHba->FwDebugBuffer_P)
1116 iounmap(pHba->FwDebugBuffer_P);
1117 if(pHba->hrt) {
1118 dma_free_coherent(&pHba->pDev->dev,
1119 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1120 pHba->hrt, pHba->hrt_pa);
1121 }
1122 if(pHba->lct) {
1123 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1124 pHba->lct, pHba->lct_pa);
1125 }
1126 if(pHba->status_block) {
1127 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1128 pHba->status_block, pHba->status_block_pa);
1129 }
1130 if(pHba->reply_pool) {
1131 dma_free_coherent(&pHba->pDev->dev,
1132 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1133 pHba->reply_pool, pHba->reply_pool_pa);
1134 }
1135
1136 for(d = pHba->devices; d ; d = next){
1137 next = d->next;
1138 kfree(d);
1139 }
1140 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1141 for(j = 0; j < MAX_ID; j++){
1142 if(pHba->channel[i].device[j] != NULL){
1143 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1144 pNext = pDev->next_lun;
1145 kfree(pDev);
1146 }
1147 }
1148 }
1149 }
1150 pci_dev_put(pHba->pDev);
1151 if (adpt_sysfs_class)
1152 device_destroy(adpt_sysfs_class,
1153 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1154 kfree(pHba);
1155
1156 if(hba_count <= 0){
1157 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1158 if (adpt_sysfs_class) {
1159 class_destroy(adpt_sysfs_class);
1160 adpt_sysfs_class = NULL;
1161 }
1162 }
1163}
1164
1165static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1166{
1167 struct adpt_device* d;
1168
1169 if(chan < 0 || chan >= MAX_CHANNEL)
1170 return NULL;
1171
1172 if( pHba->channel[chan].device == NULL){
1173 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1174 return NULL;
1175 }
1176
1177 d = pHba->channel[chan].device[id];
1178 if(!d || d->tid == 0) {
1179 return NULL;
1180 }
1181
1182
1183 if(d->scsi_lun == lun){
1184 return d;
1185 }
1186
1187
1188 for(d=d->next_lun ; d ; d = d->next_lun){
1189 if(d->scsi_lun == lun){
1190 return d;
1191 }
1192 }
1193 return NULL;
1194}
1195
1196
1197static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1198{
1199
1200
1201
1202 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1203 int status = 0;
1204 ulong flags = 0;
1205 struct adpt_i2o_post_wait_data *p1, *p2;
1206 struct adpt_i2o_post_wait_data *wait_data =
1207 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1208 DECLARE_WAITQUEUE(wait, current);
1209
1210 if (!wait_data)
1211 return -ENOMEM;
1212
1213
1214
1215
1216
1217 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1218
1219
1220 wait_data->next = adpt_post_wait_queue;
1221 adpt_post_wait_queue = wait_data;
1222 adpt_post_wait_id++;
1223 adpt_post_wait_id &= 0x7fff;
1224 wait_data->id = adpt_post_wait_id;
1225 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1226
1227 wait_data->wq = &adpt_wq_i2o_post;
1228 wait_data->status = -ETIMEDOUT;
1229
1230 add_wait_queue(&adpt_wq_i2o_post, &wait);
1231
1232 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1233 timeout *= HZ;
1234 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1235 set_current_state(TASK_INTERRUPTIBLE);
1236 if(pHba->host)
1237 spin_unlock_irq(pHba->host->host_lock);
1238 if (!timeout)
1239 schedule();
1240 else{
1241 timeout = schedule_timeout(timeout);
1242 if (timeout == 0) {
1243
1244
1245
1246 status = -ETIME;
1247 }
1248 }
1249 if(pHba->host)
1250 spin_lock_irq(pHba->host->host_lock);
1251 }
1252 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1253
1254 if(status == -ETIMEDOUT){
1255 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1256
1257 return status;
1258 }
1259
1260
1261 p2 = NULL;
1262 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1263 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1264 if(p1 == wait_data) {
1265 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1266 status = -EOPNOTSUPP;
1267 }
1268 if(p2) {
1269 p2->next = p1->next;
1270 } else {
1271 adpt_post_wait_queue = p1->next;
1272 }
1273 break;
1274 }
1275 }
1276 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1277
1278 kfree(wait_data);
1279
1280 return status;
1281}
1282
1283
1284static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1285{
1286
1287 u32 m = EMPTY_QUEUE;
1288 u32 __iomem *msg;
1289 ulong timeout = jiffies + 30*HZ;
1290 do {
1291 rmb();
1292 m = readl(pHba->post_port);
1293 if (m != EMPTY_QUEUE) {
1294 break;
1295 }
1296 if(time_after(jiffies,timeout)){
1297 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1298 return -ETIMEDOUT;
1299 }
1300 schedule_timeout_uninterruptible(1);
1301 } while(m == EMPTY_QUEUE);
1302
1303 msg = pHba->msg_addr_virt + m;
1304 memcpy_toio(msg, data, len);
1305 wmb();
1306
1307
1308 writel(m, pHba->post_port);
1309 wmb();
1310
1311 return 0;
1312}
1313
1314
1315static void adpt_i2o_post_wait_complete(u32 context, int status)
1316{
1317 struct adpt_i2o_post_wait_data *p1 = NULL;
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331 context &= 0x7fff;
1332
1333 spin_lock(&adpt_post_wait_lock);
1334 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1335 if(p1->id == context) {
1336 p1->status = status;
1337 spin_unlock(&adpt_post_wait_lock);
1338 wake_up_interruptible(p1->wq);
1339 return;
1340 }
1341 }
1342 spin_unlock(&adpt_post_wait_lock);
1343
1344 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1345 printk(KERN_DEBUG" Tasks in wait queue:\n");
1346 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1347 printk(KERN_DEBUG" %d\n",p1->id);
1348 }
1349 return;
1350}
1351
1352static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1353{
1354 u32 msg[8];
1355 u8* status;
1356 dma_addr_t addr;
1357 u32 m = EMPTY_QUEUE ;
1358 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1359
1360 if(pHba->initialized == FALSE) {
1361 timeout = jiffies + (25*HZ);
1362 } else {
1363 adpt_i2o_quiesce_hba(pHba);
1364 }
1365
1366 do {
1367 rmb();
1368 m = readl(pHba->post_port);
1369 if (m != EMPTY_QUEUE) {
1370 break;
1371 }
1372 if(time_after(jiffies,timeout)){
1373 printk(KERN_WARNING"Timeout waiting for message!\n");
1374 return -ETIMEDOUT;
1375 }
1376 schedule_timeout_uninterruptible(1);
1377 } while (m == EMPTY_QUEUE);
1378
1379 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1380 if(status == NULL) {
1381 adpt_send_nop(pHba, m);
1382 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1383 return -ENOMEM;
1384 }
1385 memset(status,0,4);
1386
1387 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1388 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1389 msg[2]=0;
1390 msg[3]=0;
1391 msg[4]=0;
1392 msg[5]=0;
1393 msg[6]=dma_low(addr);
1394 msg[7]=dma_high(addr);
1395
1396 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1397 wmb();
1398 writel(m, pHba->post_port);
1399 wmb();
1400
1401 while(*status == 0){
1402 if(time_after(jiffies,timeout)){
1403 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1404
1405
1406
1407
1408 return -ETIMEDOUT;
1409 }
1410 rmb();
1411 schedule_timeout_uninterruptible(1);
1412 }
1413
1414 if(*status == 0x01 ) {
1415 PDEBUG("%s: Reset in progress...\n", pHba->name);
1416
1417
1418 do {
1419 rmb();
1420 m = readl(pHba->post_port);
1421 if (m != EMPTY_QUEUE) {
1422 break;
1423 }
1424 if(time_after(jiffies,timeout)){
1425 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1426
1427
1428
1429
1430 return -ETIMEDOUT;
1431 }
1432 schedule_timeout_uninterruptible(1);
1433 } while (m == EMPTY_QUEUE);
1434
1435 adpt_send_nop(pHba, m);
1436 }
1437 adpt_i2o_status_get(pHba);
1438 if(*status == 0x02 ||
1439 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1440 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1441 pHba->name);
1442 } else {
1443 PDEBUG("%s: Reset completed.\n", pHba->name);
1444 }
1445
1446 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1447#ifdef UARTDELAY
1448
1449
1450 adpt_delay(20000);
1451#endif
1452 return 0;
1453}
1454
1455
1456static int adpt_i2o_parse_lct(adpt_hba* pHba)
1457{
1458 int i;
1459 int max;
1460 int tid;
1461 struct i2o_device *d;
1462 i2o_lct *lct = pHba->lct;
1463 u8 bus_no = 0;
1464 s16 scsi_id;
1465 s16 scsi_lun;
1466 u32 buf[10];
1467 struct adpt_device* pDev;
1468
1469 if (lct == NULL) {
1470 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1471 return -1;
1472 }
1473
1474 max = lct->table_size;
1475 max -= 3;
1476 max /= 9;
1477
1478 for(i=0;i<max;i++) {
1479 if( lct->lct_entry[i].user_tid != 0xfff){
1480
1481
1482
1483
1484
1485
1486
1487 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1488 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1489 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1490 continue;
1491 }
1492 tid = lct->lct_entry[i].tid;
1493
1494 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1495 continue;
1496 }
1497 bus_no = buf[0]>>16;
1498 scsi_id = buf[1];
1499 scsi_lun = (buf[2]>>8 )&0xff;
1500 if(bus_no >= MAX_CHANNEL) {
1501 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1502 continue;
1503 }
1504 if (scsi_id >= MAX_ID){
1505 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1506 continue;
1507 }
1508 if(bus_no > pHba->top_scsi_channel){
1509 pHba->top_scsi_channel = bus_no;
1510 }
1511 if(scsi_id > pHba->top_scsi_id){
1512 pHba->top_scsi_id = scsi_id;
1513 }
1514 if(scsi_lun > pHba->top_scsi_lun){
1515 pHba->top_scsi_lun = scsi_lun;
1516 }
1517 continue;
1518 }
1519 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1520 if(d==NULL)
1521 {
1522 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1523 return -ENOMEM;
1524 }
1525
1526 d->controller = pHba;
1527 d->next = NULL;
1528
1529 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1530
1531 d->flags = 0;
1532 tid = d->lct_data.tid;
1533 adpt_i2o_report_hba_unit(pHba, d);
1534 adpt_i2o_install_device(pHba, d);
1535 }
1536 bus_no = 0;
1537 for(d = pHba->devices; d ; d = d->next) {
1538 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1539 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1540 tid = d->lct_data.tid;
1541
1542
1543 if(bus_no > pHba->top_scsi_channel){
1544 pHba->top_scsi_channel = bus_no;
1545 }
1546 pHba->channel[bus_no].type = d->lct_data.class_id;
1547 pHba->channel[bus_no].tid = tid;
1548 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1549 {
1550 pHba->channel[bus_no].scsi_id = buf[1];
1551 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1552 }
1553
1554 bus_no++;
1555 if(bus_no >= MAX_CHANNEL) {
1556 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1557 break;
1558 }
1559 }
1560 }
1561
1562
1563 for(d = pHba->devices; d ; d = d->next) {
1564 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1565 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1566 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1567
1568 tid = d->lct_data.tid;
1569 scsi_id = -1;
1570
1571 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1572 bus_no = buf[0]>>16;
1573 scsi_id = buf[1];
1574 scsi_lun = (buf[2]>>8 )&0xff;
1575 if(bus_no >= MAX_CHANNEL) {
1576 continue;
1577 }
1578 if (scsi_id >= MAX_ID) {
1579 continue;
1580 }
1581 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1582 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1583 if(pDev == NULL) {
1584 return -ENOMEM;
1585 }
1586 pHba->channel[bus_no].device[scsi_id] = pDev;
1587 } else {
1588 for( pDev = pHba->channel[bus_no].device[scsi_id];
1589 pDev->next_lun; pDev = pDev->next_lun){
1590 }
1591 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1592 if(pDev->next_lun == NULL) {
1593 return -ENOMEM;
1594 }
1595 pDev = pDev->next_lun;
1596 }
1597 pDev->tid = tid;
1598 pDev->scsi_channel = bus_no;
1599 pDev->scsi_id = scsi_id;
1600 pDev->scsi_lun = scsi_lun;
1601 pDev->pI2o_dev = d;
1602 d->owner = pDev;
1603 pDev->type = (buf[0])&0xff;
1604 pDev->flags = (buf[0]>>8)&0xff;
1605 if(scsi_id > pHba->top_scsi_id){
1606 pHba->top_scsi_id = scsi_id;
1607 }
1608 if(scsi_lun > pHba->top_scsi_lun){
1609 pHba->top_scsi_lun = scsi_lun;
1610 }
1611 }
1612 if(scsi_id == -1){
1613 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1614 d->lct_data.identity_tag);
1615 }
1616 }
1617 }
1618 return 0;
1619}
1620
1621
1622
1623
1624
1625
1626
1627static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1628{
1629 mutex_lock(&adpt_configuration_lock);
1630 d->controller=pHba;
1631 d->owner=NULL;
1632 d->next=pHba->devices;
1633 d->prev=NULL;
1634 if (pHba->devices != NULL){
1635 pHba->devices->prev=d;
1636 }
1637 pHba->devices=d;
1638 *d->dev_name = 0;
1639
1640 mutex_unlock(&adpt_configuration_lock);
1641 return 0;
1642}
1643
1644static int adpt_open(struct inode *inode, struct file *file)
1645{
1646 int minor;
1647 adpt_hba* pHba;
1648
1649 mutex_lock(&adpt_mutex);
1650
1651
1652 minor = iminor(inode);
1653 if (minor >= hba_count) {
1654 mutex_unlock(&adpt_mutex);
1655 return -ENXIO;
1656 }
1657 mutex_lock(&adpt_configuration_lock);
1658 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1659 if (pHba->unit == minor) {
1660 break;
1661 }
1662 }
1663 if (pHba == NULL) {
1664 mutex_unlock(&adpt_configuration_lock);
1665 mutex_unlock(&adpt_mutex);
1666 return -ENXIO;
1667 }
1668
1669
1670
1671
1672
1673
1674 pHba->in_use = 1;
1675 mutex_unlock(&adpt_configuration_lock);
1676 mutex_unlock(&adpt_mutex);
1677
1678 return 0;
1679}
1680
1681static int adpt_close(struct inode *inode, struct file *file)
1682{
1683 int minor;
1684 adpt_hba* pHba;
1685
1686 minor = iminor(inode);
1687 if (minor >= hba_count) {
1688 return -ENXIO;
1689 }
1690 mutex_lock(&adpt_configuration_lock);
1691 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1692 if (pHba->unit == minor) {
1693 break;
1694 }
1695 }
1696 mutex_unlock(&adpt_configuration_lock);
1697 if (pHba == NULL) {
1698 return -ENXIO;
1699 }
1700
1701 pHba->in_use = 0;
1702
1703 return 0;
1704}
1705
1706
1707static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1708{
1709 u32 msg[MAX_MESSAGE_SIZE];
1710 u32* reply = NULL;
1711 u32 size = 0;
1712 u32 reply_size = 0;
1713 u32 __user *user_msg = arg;
1714 u32 __user * user_reply = NULL;
1715 void *sg_list[pHba->sg_tablesize];
1716 u32 sg_offset = 0;
1717 u32 sg_count = 0;
1718 int sg_index = 0;
1719 u32 i = 0;
1720 u32 rcode = 0;
1721 void *p = NULL;
1722 dma_addr_t addr;
1723 ulong flags = 0;
1724
1725 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1726
1727 if(get_user(size, &user_msg[0])){
1728 return -EFAULT;
1729 }
1730 size = size>>16;
1731
1732 user_reply = &user_msg[size];
1733 if(size > MAX_MESSAGE_SIZE){
1734 return -EFAULT;
1735 }
1736 size *= 4;
1737
1738
1739 if(copy_from_user(msg, user_msg, size)) {
1740 return -EFAULT;
1741 }
1742 get_user(reply_size, &user_reply[0]);
1743 reply_size = reply_size>>16;
1744 if(reply_size > REPLY_FRAME_SIZE){
1745 reply_size = REPLY_FRAME_SIZE;
1746 }
1747 reply_size *= 4;
1748 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1749 if(reply == NULL) {
1750 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1751 return -ENOMEM;
1752 }
1753 sg_offset = (msg[0]>>4)&0xf;
1754 msg[2] = 0x40000000;
1755 msg[3] = adpt_ioctl_to_context(pHba, reply);
1756 if (msg[3] == (u32)-1)
1757 return -EBUSY;
1758
1759 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1760 if(sg_offset) {
1761
1762 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1763 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1764 if (sg_count > pHba->sg_tablesize){
1765 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1766 kfree (reply);
1767 return -EINVAL;
1768 }
1769
1770 for(i = 0; i < sg_count; i++) {
1771 int sg_size;
1772
1773 if (!(sg[i].flag_count & 0x10000000 )) {
1774 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1775 rcode = -EINVAL;
1776 goto cleanup;
1777 }
1778 sg_size = sg[i].flag_count & 0xffffff;
1779
1780 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1781 if(!p) {
1782 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1783 pHba->name,sg_size,i,sg_count);
1784 rcode = -ENOMEM;
1785 goto cleanup;
1786 }
1787 sg_list[sg_index++] = p;
1788
1789 if(sg[i].flag_count & 0x04000000 ) {
1790
1791 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1792 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1793 rcode = -EFAULT;
1794 goto cleanup;
1795 }
1796 }
1797
1798 sg[i].addr_bus = addr;
1799 }
1800 }
1801
1802 do {
1803
1804
1805
1806
1807 if(pHba->host) {
1808 scsi_block_requests(pHba->host);
1809 spin_lock_irqsave(pHba->host->host_lock, flags);
1810 }
1811 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1812 if (rcode != 0)
1813 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1814 rcode, reply);
1815 if(pHba->host) {
1816 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1817 scsi_unblock_requests(pHba->host);
1818 }
1819 } while(rcode == -ETIMEDOUT);
1820
1821 if(rcode){
1822 goto cleanup;
1823 }
1824
1825 if(sg_offset) {
1826
1827 u32 j;
1828
1829 struct sg_simple_element* sg;
1830 int sg_size;
1831
1832
1833 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1834
1835 if(get_user(size, &user_msg[0])){
1836 rcode = -EFAULT;
1837 goto cleanup;
1838 }
1839 size = size>>16;
1840 size *= 4;
1841 if (size > MAX_MESSAGE_SIZE) {
1842 rcode = -EINVAL;
1843 goto cleanup;
1844 }
1845
1846 if (copy_from_user (msg, user_msg, size)) {
1847 rcode = -EFAULT;
1848 goto cleanup;
1849 }
1850 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1851
1852
1853 sg = (struct sg_simple_element*)(msg + sg_offset);
1854 for (j = 0; j < sg_count; j++) {
1855
1856 if(! (sg[j].flag_count & 0x4000000 )) {
1857 sg_size = sg[j].flag_count & 0xffffff;
1858
1859 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1860 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1861 rcode = -EFAULT;
1862 goto cleanup;
1863 }
1864 }
1865 }
1866 }
1867
1868
1869 if (reply_size) {
1870
1871 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1872 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1873 rcode = -EFAULT;
1874 }
1875 if(copy_to_user(user_reply, reply, reply_size)) {
1876 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1877 rcode = -EFAULT;
1878 }
1879 }
1880
1881
1882cleanup:
1883 if (rcode != -ETIME && rcode != -EINTR) {
1884 struct sg_simple_element *sg =
1885 (struct sg_simple_element*) (msg +sg_offset);
1886 kfree (reply);
1887 while(sg_index) {
1888 if(sg_list[--sg_index]) {
1889 dma_free_coherent(&pHba->pDev->dev,
1890 sg[sg_index].flag_count & 0xffffff,
1891 sg_list[sg_index],
1892 sg[sg_index].addr_bus);
1893 }
1894 }
1895 }
1896 return rcode;
1897}
1898
1899#if defined __ia64__
1900static void adpt_ia64_info(sysInfo_S* si)
1901{
1902
1903
1904
1905 si->processorType = PROC_IA64;
1906}
1907#endif
1908
1909#if defined __sparc__
1910static void adpt_sparc_info(sysInfo_S* si)
1911{
1912
1913
1914
1915 si->processorType = PROC_ULTRASPARC;
1916}
1917#endif
1918#if defined __alpha__
1919static void adpt_alpha_info(sysInfo_S* si)
1920{
1921
1922
1923
1924 si->processorType = PROC_ALPHA;
1925}
1926#endif
1927
1928#if defined __i386__
1929static void adpt_i386_info(sysInfo_S* si)
1930{
1931
1932
1933
1934 switch (boot_cpu_data.x86) {
1935 case CPU_386:
1936 si->processorType = PROC_386;
1937 break;
1938 case CPU_486:
1939 si->processorType = PROC_486;
1940 break;
1941 case CPU_586:
1942 si->processorType = PROC_PENTIUM;
1943 break;
1944 default:
1945 si->processorType = PROC_PENTIUM;
1946 break;
1947 }
1948}
1949#endif
1950
1951
1952
1953
1954
1955
1956
1957static int adpt_system_info(void __user *buffer)
1958{
1959 sysInfo_S si;
1960
1961 memset(&si, 0, sizeof(si));
1962
1963 si.osType = OS_LINUX;
1964 si.osMajorVersion = 0;
1965 si.osMinorVersion = 0;
1966 si.osRevision = 0;
1967 si.busType = SI_PCI_BUS;
1968 si.processorFamily = DPTI_sig.dsProcessorFamily;
1969
1970#if defined __i386__
1971 adpt_i386_info(&si);
1972#elif defined (__ia64__)
1973 adpt_ia64_info(&si);
1974#elif defined(__sparc__)
1975 adpt_sparc_info(&si);
1976#elif defined (__alpha__)
1977 adpt_alpha_info(&si);
1978#else
1979 si.processorType = 0xff ;
1980#endif
1981 if (copy_to_user(buffer, &si, sizeof(si))){
1982 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1983 return -EFAULT;
1984 }
1985
1986 return 0;
1987}
1988
1989static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1990{
1991 int minor;
1992 int error = 0;
1993 adpt_hba* pHba;
1994 ulong flags = 0;
1995 void __user *argp = (void __user *)arg;
1996
1997 minor = iminor(inode);
1998 if (minor >= DPTI_MAX_HBA){
1999 return -ENXIO;
2000 }
2001 mutex_lock(&adpt_configuration_lock);
2002 for (pHba = hba_chain; pHba; pHba = pHba->next) {
2003 if (pHba->unit == minor) {
2004 break;
2005 }
2006 }
2007 mutex_unlock(&adpt_configuration_lock);
2008 if(pHba == NULL){
2009 return -ENXIO;
2010 }
2011
2012 while((volatile u32) pHba->state & DPTI_STATE_RESET )
2013 schedule_timeout_uninterruptible(2);
2014
2015 switch (cmd) {
2016
2017 case DPT_SIGNATURE:
2018 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2019 return -EFAULT;
2020 }
2021 break;
2022 case I2OUSRCMD:
2023 return adpt_i2o_passthru(pHba, argp);
2024
2025 case DPT_CTRLINFO:{
2026 drvrHBAinfo_S HbaInfo;
2027
2028#define FLG_OSD_PCI_VALID 0x0001
2029#define FLG_OSD_DMA 0x0002
2030#define FLG_OSD_I2O 0x0004
2031 memset(&HbaInfo, 0, sizeof(HbaInfo));
2032 HbaInfo.drvrHBAnum = pHba->unit;
2033 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2034 HbaInfo.blinkState = adpt_read_blink_led(pHba);
2035 HbaInfo.pciBusNum = pHba->pDev->bus->number;
2036 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2037 HbaInfo.Interrupt = pHba->pDev->irq;
2038 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2039 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2040 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2041 return -EFAULT;
2042 }
2043 break;
2044 }
2045 case DPT_SYSINFO:
2046 return adpt_system_info(argp);
2047 case DPT_BLINKLED:{
2048 u32 value;
2049 value = (u32)adpt_read_blink_led(pHba);
2050 if (copy_to_user(argp, &value, sizeof(value))) {
2051 return -EFAULT;
2052 }
2053 break;
2054 }
2055 case I2ORESETCMD:
2056 if(pHba->host)
2057 spin_lock_irqsave(pHba->host->host_lock, flags);
2058 adpt_hba_reset(pHba);
2059 if(pHba->host)
2060 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2061 break;
2062 case I2ORESCANCMD:
2063 adpt_rescan(pHba);
2064 break;
2065 default:
2066 return -EINVAL;
2067 }
2068
2069 return error;
2070}
2071
2072static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2073{
2074 struct inode *inode;
2075 long ret;
2076
2077 inode = file_inode(file);
2078
2079 mutex_lock(&adpt_mutex);
2080 ret = adpt_ioctl(inode, file, cmd, arg);
2081 mutex_unlock(&adpt_mutex);
2082
2083 return ret;
2084}
2085
2086#ifdef CONFIG_COMPAT
2087static long compat_adpt_ioctl(struct file *file,
2088 unsigned int cmd, unsigned long arg)
2089{
2090 struct inode *inode;
2091 long ret;
2092
2093 inode = file_inode(file);
2094
2095 mutex_lock(&adpt_mutex);
2096
2097 switch(cmd) {
2098 case DPT_SIGNATURE:
2099 case I2OUSRCMD:
2100 case DPT_CTRLINFO:
2101 case DPT_SYSINFO:
2102 case DPT_BLINKLED:
2103 case I2ORESETCMD:
2104 case I2ORESCANCMD:
2105 case (DPT_TARGET_BUSY & 0xFFFF):
2106 case DPT_TARGET_BUSY:
2107 ret = adpt_ioctl(inode, file, cmd, arg);
2108 break;
2109 default:
2110 ret = -ENOIOCTLCMD;
2111 }
2112
2113 mutex_unlock(&adpt_mutex);
2114
2115 return ret;
2116}
2117#endif
2118
2119static irqreturn_t adpt_isr(int irq, void *dev_id)
2120{
2121 struct scsi_cmnd* cmd;
2122 adpt_hba* pHba = dev_id;
2123 u32 m;
2124 void __iomem *reply;
2125 u32 status=0;
2126 u32 context;
2127 ulong flags = 0;
2128 int handled = 0;
2129
2130 if (pHba == NULL){
2131 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2132 return IRQ_NONE;
2133 }
2134 if(pHba->host)
2135 spin_lock_irqsave(pHba->host->host_lock, flags);
2136
2137 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2138 m = readl(pHba->reply_port);
2139 if(m == EMPTY_QUEUE){
2140
2141 rmb();
2142 m = readl(pHba->reply_port);
2143 if(m == EMPTY_QUEUE){
2144
2145 printk(KERN_ERR"dpti: Could not get reply frame\n");
2146 goto out;
2147 }
2148 }
2149 if (pHba->reply_pool_pa <= m &&
2150 m < pHba->reply_pool_pa +
2151 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2152 reply = (u8 *)pHba->reply_pool +
2153 (m - pHba->reply_pool_pa);
2154 } else {
2155
2156 printk(KERN_ERR "dpti: reply frame not from pool\n");
2157 reply = (u8 *)bus_to_virt(m);
2158 }
2159
2160 if (readl(reply) & MSG_FAIL) {
2161 u32 old_m = readl(reply+28);
2162 void __iomem *msg;
2163 u32 old_context;
2164 PDEBUG("%s: Failed message\n",pHba->name);
2165 if(old_m >= 0x100000){
2166 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2167 writel(m,pHba->reply_port);
2168 continue;
2169 }
2170
2171 msg = pHba->msg_addr_virt + old_m;
2172 old_context = readl(msg+12);
2173 writel(old_context, reply+12);
2174 adpt_send_nop(pHba, old_m);
2175 }
2176 context = readl(reply+8);
2177 if(context & 0x40000000){
2178 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2179 if( p != NULL) {
2180 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2181 }
2182
2183 }
2184 if(context & 0x80000000){
2185 status = readl(reply+16);
2186 if(status >> 24){
2187 status &= 0xffff;
2188 } else {
2189 status = I2O_POST_WAIT_OK;
2190 }
2191 if(!(context & 0x40000000)) {
2192 cmd = adpt_cmd_from_context(pHba,
2193 readl(reply+12));
2194 if(cmd != NULL) {
2195 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2196 }
2197 }
2198 adpt_i2o_post_wait_complete(context, status);
2199 } else {
2200 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2201 if(cmd != NULL){
2202 scsi_dma_unmap(cmd);
2203 if(cmd->serial_number != 0) {
2204 adpt_i2o_to_scsi(reply, cmd);
2205 }
2206 }
2207 }
2208 writel(m, pHba->reply_port);
2209 wmb();
2210 rmb();
2211 }
2212 handled = 1;
2213out: if(pHba->host)
2214 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2215 return IRQ_RETVAL(handled);
2216}
2217
2218static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2219{
2220 int i;
2221 u32 msg[MAX_MESSAGE_SIZE];
2222 u32* mptr;
2223 u32* lptr;
2224 u32 *lenptr;
2225 int direction;
2226 int scsidir;
2227 int nseg;
2228 u32 len;
2229 u32 reqlen;
2230 s32 rcode;
2231 dma_addr_t addr;
2232
2233 memset(msg, 0 , sizeof(msg));
2234 len = scsi_bufflen(cmd);
2235 direction = 0x00000000;
2236
2237 scsidir = 0x00000000;
2238 if(len) {
2239
2240
2241
2242
2243
2244
2245 switch(cmd->sc_data_direction){
2246 case DMA_FROM_DEVICE:
2247 scsidir =0x40000000;
2248 break;
2249 case DMA_TO_DEVICE:
2250 direction=0x04000000;
2251 scsidir =0x80000000;
2252 break;
2253 case DMA_NONE:
2254 break;
2255 case DMA_BIDIRECTIONAL:
2256 scsidir =0x40000000;
2257
2258 break;
2259 default:
2260 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2261 pHba->name, cmd->cmnd[0]);
2262 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2263 cmd->scsi_done(cmd);
2264 return 0;
2265 }
2266 }
2267
2268
2269 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2270 msg[2] = 0;
2271 msg[3] = adpt_cmd_to_context(cmd);
2272
2273
2274 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2275 msg[5] = d->tid;
2276
2277
2278
2279
2280 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2281
2282 mptr=msg+7;
2283
2284
2285 memset(mptr, 0, 16);
2286 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2287 mptr+=4;
2288 lenptr=mptr++;
2289 if (dpt_dma64(pHba)) {
2290 reqlen = 16;
2291 *mptr++ = (0x7C<<24)+(2<<16)+0x02;
2292 *mptr++ = 1 << PAGE_SHIFT;
2293 } else {
2294 reqlen = 14;
2295 }
2296
2297
2298 nseg = scsi_dma_map(cmd);
2299 BUG_ON(nseg < 0);
2300 if (nseg) {
2301 struct scatterlist *sg;
2302
2303 len = 0;
2304 scsi_for_each_sg(cmd, sg, nseg, i) {
2305 lptr = mptr;
2306 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2307 len+=sg_dma_len(sg);
2308 addr = sg_dma_address(sg);
2309 *mptr++ = dma_low(addr);
2310 if (dpt_dma64(pHba))
2311 *mptr++ = dma_high(addr);
2312
2313 if (i == nseg - 1)
2314 *lptr = direction|0xD0000000|sg_dma_len(sg);
2315 }
2316 reqlen = mptr - msg;
2317 *lenptr = len;
2318
2319 if(cmd->underflow && len != cmd->underflow){
2320 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2321 len, cmd->underflow);
2322 }
2323 } else {
2324 *lenptr = len = 0;
2325 reqlen = 12;
2326 }
2327
2328
2329 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2330
2331
2332 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2333 if (rcode == 0) {
2334 return 0;
2335 }
2336 return rcode;
2337}
2338
2339
2340static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2341{
2342 struct Scsi_Host *host;
2343
2344 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2345 if (host == NULL) {
2346 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2347 return -1;
2348 }
2349 host->hostdata[0] = (unsigned long)pHba;
2350 pHba->host = host;
2351
2352 host->irq = pHba->pDev->irq;
2353
2354
2355
2356 host->io_port = 0;
2357 host->n_io_port = 0;
2358
2359 host->max_id = 16;
2360 host->max_lun = 256;
2361 host->max_channel = pHba->top_scsi_channel + 1;
2362 host->cmd_per_lun = 1;
2363 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2364 host->sg_tablesize = pHba->sg_tablesize;
2365 host->can_queue = pHba->post_fifo_size;
2366 host->use_cmd_list = 1;
2367
2368 return 0;
2369}
2370
2371
2372static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2373{
2374 adpt_hba* pHba;
2375 u32 hba_status;
2376 u32 dev_status;
2377 u32 reply_flags = readl(reply) & 0xff00;
2378
2379
2380
2381 u16 detailed_status = readl(reply+16) &0xffff;
2382 dev_status = (detailed_status & 0xff);
2383 hba_status = detailed_status >> 8;
2384
2385
2386 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2387
2388 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2389
2390 cmd->sense_buffer[0] = '\0';
2391
2392 if(!(reply_flags & MSG_FAIL)) {
2393 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2394 case I2O_SCSI_DSC_SUCCESS:
2395 cmd->result = (DID_OK << 16);
2396
2397 if (readl(reply+20) < cmd->underflow) {
2398 cmd->result = (DID_ERROR <<16);
2399 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2400 }
2401 break;
2402 case I2O_SCSI_DSC_REQUEST_ABORTED:
2403 cmd->result = (DID_ABORT << 16);
2404 break;
2405 case I2O_SCSI_DSC_PATH_INVALID:
2406 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2407 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2408 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2409 case I2O_SCSI_DSC_NO_ADAPTER:
2410 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2411 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2412 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2413 cmd->result = (DID_TIME_OUT << 16);
2414 break;
2415 case I2O_SCSI_DSC_ADAPTER_BUSY:
2416 case I2O_SCSI_DSC_BUS_BUSY:
2417 cmd->result = (DID_BUS_BUSY << 16);
2418 break;
2419 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2420 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2421 cmd->result = (DID_RESET << 16);
2422 break;
2423 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2424 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2425 cmd->result = (DID_PARITY << 16);
2426 break;
2427 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2428 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2429 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2430 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2431 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2432 case I2O_SCSI_DSC_DATA_OVERRUN:
2433 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2434 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2435 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2436 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2437 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2438 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2439 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2440 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2441 case I2O_SCSI_DSC_INVALID_CDB:
2442 case I2O_SCSI_DSC_LUN_INVALID:
2443 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2444 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2445 case I2O_SCSI_DSC_NO_NEXUS:
2446 case I2O_SCSI_DSC_CDB_RECEIVED:
2447 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2448 case I2O_SCSI_DSC_QUEUE_FROZEN:
2449 case I2O_SCSI_DSC_REQUEST_INVALID:
2450 default:
2451 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2452 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2453 hba_status, dev_status, cmd->cmnd[0]);
2454 cmd->result = (DID_ERROR << 16);
2455 break;
2456 }
2457
2458
2459
2460 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2461 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2462
2463 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2464 if(cmd->sense_buffer[0] == 0x70 &&
2465 cmd->sense_buffer[2] == DATA_PROTECT ){
2466
2467 cmd->result = (DID_TIME_OUT << 16);
2468 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2469 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2470 hba_status, dev_status, cmd->cmnd[0]);
2471
2472 }
2473 }
2474 } else {
2475
2476
2477
2478
2479 cmd->result = (DID_TIME_OUT << 16);
2480 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2481 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2482 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2483 }
2484
2485 cmd->result |= (dev_status);
2486
2487 if(cmd->scsi_done != NULL){
2488 cmd->scsi_done(cmd);
2489 }
2490 return cmd->result;
2491}
2492
2493
2494static s32 adpt_rescan(adpt_hba* pHba)
2495{
2496 s32 rcode;
2497 ulong flags = 0;
2498
2499 if(pHba->host)
2500 spin_lock_irqsave(pHba->host->host_lock, flags);
2501 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2502 goto out;
2503 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2504 goto out;
2505 rcode = 0;
2506out: if(pHba->host)
2507 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2508 return rcode;
2509}
2510
2511
2512static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2513{
2514 int i;
2515 int max;
2516 int tid;
2517 struct i2o_device *d;
2518 i2o_lct *lct = pHba->lct;
2519 u8 bus_no = 0;
2520 s16 scsi_id;
2521 s16 scsi_lun;
2522 u32 buf[10];
2523 struct adpt_device* pDev = NULL;
2524 struct i2o_device* pI2o_dev = NULL;
2525
2526 if (lct == NULL) {
2527 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2528 return -1;
2529 }
2530
2531 max = lct->table_size;
2532 max -= 3;
2533 max /= 9;
2534
2535
2536 for (d = pHba->devices; d; d = d->next) {
2537 pDev =(struct adpt_device*) d->owner;
2538 if(!pDev){
2539 continue;
2540 }
2541 pDev->state |= DPTI_DEV_UNSCANNED;
2542 }
2543
2544 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2545
2546 for(i=0;i<max;i++) {
2547 if( lct->lct_entry[i].user_tid != 0xfff){
2548 continue;
2549 }
2550
2551 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2552 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2553 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2554 tid = lct->lct_entry[i].tid;
2555 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2556 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2557 continue;
2558 }
2559 bus_no = buf[0]>>16;
2560 if (bus_no >= MAX_CHANNEL) {
2561 printk(KERN_WARNING
2562 "%s: Channel number %d out of range\n",
2563 pHba->name, bus_no);
2564 continue;
2565 }
2566
2567 scsi_id = buf[1];
2568 scsi_lun = (buf[2]>>8 )&0xff;
2569 pDev = pHba->channel[bus_no].device[scsi_id];
2570
2571 while(pDev) {
2572 if(pDev->scsi_lun == scsi_lun) {
2573 break;
2574 }
2575 pDev = pDev->next_lun;
2576 }
2577 if(!pDev ) {
2578 d = kmalloc(sizeof(struct i2o_device),
2579 GFP_ATOMIC);
2580 if(d==NULL)
2581 {
2582 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2583 return -ENOMEM;
2584 }
2585
2586 d->controller = pHba;
2587 d->next = NULL;
2588
2589 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2590
2591 d->flags = 0;
2592 adpt_i2o_report_hba_unit(pHba, d);
2593 adpt_i2o_install_device(pHba, d);
2594
2595 pDev = pHba->channel[bus_no].device[scsi_id];
2596 if( pDev == NULL){
2597 pDev =
2598 kzalloc(sizeof(struct adpt_device),
2599 GFP_ATOMIC);
2600 if(pDev == NULL) {
2601 return -ENOMEM;
2602 }
2603 pHba->channel[bus_no].device[scsi_id] = pDev;
2604 } else {
2605 while (pDev->next_lun) {
2606 pDev = pDev->next_lun;
2607 }
2608 pDev = pDev->next_lun =
2609 kzalloc(sizeof(struct adpt_device),
2610 GFP_ATOMIC);
2611 if(pDev == NULL) {
2612 return -ENOMEM;
2613 }
2614 }
2615 pDev->tid = d->lct_data.tid;
2616 pDev->scsi_channel = bus_no;
2617 pDev->scsi_id = scsi_id;
2618 pDev->scsi_lun = scsi_lun;
2619 pDev->pI2o_dev = d;
2620 d->owner = pDev;
2621 pDev->type = (buf[0])&0xff;
2622 pDev->flags = (buf[0]>>8)&0xff;
2623
2624 if(scsi_id > pHba->top_scsi_id){
2625 pHba->top_scsi_id = scsi_id;
2626 }
2627 if(scsi_lun > pHba->top_scsi_lun){
2628 pHba->top_scsi_lun = scsi_lun;
2629 }
2630 continue;
2631 }
2632
2633
2634 while(pDev) {
2635 if(pDev->scsi_lun == scsi_lun) {
2636 if(!scsi_device_online(pDev->pScsi_dev)) {
2637 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2638 pHba->name,bus_no,scsi_id,scsi_lun);
2639 if (pDev->pScsi_dev) {
2640 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2641 }
2642 }
2643 d = pDev->pI2o_dev;
2644 if(d->lct_data.tid != tid) {
2645 pDev->tid = tid;
2646 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2647 if (pDev->pScsi_dev) {
2648 pDev->pScsi_dev->changed = TRUE;
2649 pDev->pScsi_dev->removable = TRUE;
2650 }
2651 }
2652
2653 pDev->state = DPTI_DEV_ONLINE;
2654 break;
2655 }
2656 pDev = pDev->next_lun;
2657 }
2658 }
2659 }
2660 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2661 pDev =(struct adpt_device*) pI2o_dev->owner;
2662 if(!pDev){
2663 continue;
2664 }
2665
2666
2667 if (pDev->state & DPTI_DEV_UNSCANNED){
2668 pDev->state = DPTI_DEV_OFFLINE;
2669 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2670 if (pDev->pScsi_dev) {
2671 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2672 }
2673 }
2674 }
2675 return 0;
2676}
2677
2678static void adpt_fail_posted_scbs(adpt_hba* pHba)
2679{
2680 struct scsi_cmnd* cmd = NULL;
2681 struct scsi_device* d = NULL;
2682
2683 shost_for_each_device(d, pHba->host) {
2684 unsigned long flags;
2685 spin_lock_irqsave(&d->list_lock, flags);
2686 list_for_each_entry(cmd, &d->cmd_list, list) {
2687 if(cmd->serial_number == 0){
2688 continue;
2689 }
2690 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2691 cmd->scsi_done(cmd);
2692 }
2693 spin_unlock_irqrestore(&d->list_lock, flags);
2694 }
2695}
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708static int adpt_i2o_activate_hba(adpt_hba* pHba)
2709{
2710 int rcode;
2711
2712 if(pHba->initialized ) {
2713 if (adpt_i2o_status_get(pHba) < 0) {
2714 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2715 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2716 return rcode;
2717 }
2718 if (adpt_i2o_status_get(pHba) < 0) {
2719 printk(KERN_INFO "HBA not responding.\n");
2720 return -1;
2721 }
2722 }
2723
2724 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2725 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2726 return -1;
2727 }
2728
2729 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2730 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2731 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2732 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2733 adpt_i2o_reset_hba(pHba);
2734 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2735 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2736 return -1;
2737 }
2738 }
2739 } else {
2740 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2741 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2742 return rcode;
2743 }
2744
2745 }
2746
2747 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2748 return -1;
2749 }
2750
2751
2752
2753 if (adpt_i2o_hrt_get(pHba) < 0) {
2754 return -1;
2755 }
2756
2757 return 0;
2758}
2759
2760
2761
2762
2763
2764static int adpt_i2o_online_hba(adpt_hba* pHba)
2765{
2766 if (adpt_i2o_systab_send(pHba) < 0) {
2767 adpt_i2o_delete_hba(pHba);
2768 return -1;
2769 }
2770
2771
2772 if (adpt_i2o_enable_hba(pHba) < 0) {
2773 adpt_i2o_delete_hba(pHba);
2774 return -1;
2775 }
2776
2777
2778 return 0;
2779}
2780
2781static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2782{
2783 u32 __iomem *msg;
2784 ulong timeout = jiffies + 5*HZ;
2785
2786 while(m == EMPTY_QUEUE){
2787 rmb();
2788 m = readl(pHba->post_port);
2789 if(m != EMPTY_QUEUE){
2790 break;
2791 }
2792 if(time_after(jiffies,timeout)){
2793 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2794 return 2;
2795 }
2796 schedule_timeout_uninterruptible(1);
2797 }
2798 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2799 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2800 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2801 writel( 0,&msg[2]);
2802 wmb();
2803
2804 writel(m, pHba->post_port);
2805 wmb();
2806 return 0;
2807}
2808
2809static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2810{
2811 u8 *status;
2812 dma_addr_t addr;
2813 u32 __iomem *msg = NULL;
2814 int i;
2815 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2816 u32 m;
2817
2818 do {
2819 rmb();
2820 m = readl(pHba->post_port);
2821 if (m != EMPTY_QUEUE) {
2822 break;
2823 }
2824
2825 if(time_after(jiffies,timeout)){
2826 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2827 return -ETIMEDOUT;
2828 }
2829 schedule_timeout_uninterruptible(1);
2830 } while(m == EMPTY_QUEUE);
2831
2832 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2833
2834 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2835 if (!status) {
2836 adpt_send_nop(pHba, m);
2837 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2838 pHba->name);
2839 return -ENOMEM;
2840 }
2841 memset(status, 0, 4);
2842
2843 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2844 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2845 writel(0, &msg[2]);
2846 writel(0x0106, &msg[3]);
2847 writel(4096, &msg[4]);
2848 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]);
2849 writel(0xD0000004, &msg[6]);
2850 writel((u32)addr, &msg[7]);
2851
2852 writel(m, pHba->post_port);
2853 wmb();
2854
2855
2856 do {
2857 if (*status) {
2858 if (*status != 0x01 ) {
2859 break;
2860 }
2861 }
2862 rmb();
2863 if(time_after(jiffies,timeout)){
2864 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2865
2866
2867
2868
2869 return -ETIMEDOUT;
2870 }
2871 schedule_timeout_uninterruptible(1);
2872 } while (1);
2873
2874
2875
2876 if(*status != 0x04 ) {
2877 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2878 return -2;
2879 }
2880 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2881
2882 if(pHba->reply_pool != NULL) {
2883 dma_free_coherent(&pHba->pDev->dev,
2884 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2885 pHba->reply_pool, pHba->reply_pool_pa);
2886 }
2887
2888 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2889 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2890 &pHba->reply_pool_pa, GFP_KERNEL);
2891 if (!pHba->reply_pool) {
2892 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2893 return -ENOMEM;
2894 }
2895 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2896
2897 for(i = 0; i < pHba->reply_fifo_size; i++) {
2898 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2899 pHba->reply_port);
2900 wmb();
2901 }
2902 adpt_i2o_status_get(pHba);
2903 return 0;
2904}
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918static s32 adpt_i2o_status_get(adpt_hba* pHba)
2919{
2920 ulong timeout;
2921 u32 m;
2922 u32 __iomem *msg;
2923 u8 *status_block=NULL;
2924
2925 if(pHba->status_block == NULL) {
2926 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2927 sizeof(i2o_status_block),
2928 &pHba->status_block_pa, GFP_KERNEL);
2929 if(pHba->status_block == NULL) {
2930 printk(KERN_ERR
2931 "dpti%d: Get Status Block failed; Out of memory. \n",
2932 pHba->unit);
2933 return -ENOMEM;
2934 }
2935 }
2936 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2937 status_block = (u8*)(pHba->status_block);
2938 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2939 do {
2940 rmb();
2941 m = readl(pHba->post_port);
2942 if (m != EMPTY_QUEUE) {
2943 break;
2944 }
2945 if(time_after(jiffies,timeout)){
2946 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2947 pHba->name);
2948 return -ETIMEDOUT;
2949 }
2950 schedule_timeout_uninterruptible(1);
2951 } while(m==EMPTY_QUEUE);
2952
2953
2954 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2955
2956 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2957 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2958 writel(1, &msg[2]);
2959 writel(0, &msg[3]);
2960 writel(0, &msg[4]);
2961 writel(0, &msg[5]);
2962 writel( dma_low(pHba->status_block_pa), &msg[6]);
2963 writel( dma_high(pHba->status_block_pa), &msg[7]);
2964 writel(sizeof(i2o_status_block), &msg[8]);
2965
2966
2967 writel(m, pHba->post_port);
2968 wmb();
2969
2970 while(status_block[87]!=0xff){
2971 if(time_after(jiffies,timeout)){
2972 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2973 pHba->unit);
2974 return -ETIMEDOUT;
2975 }
2976 rmb();
2977 schedule_timeout_uninterruptible(1);
2978 }
2979
2980
2981 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2982 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2983 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2984 }
2985
2986 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2987 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2988 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2989 }
2990
2991
2992 if (dpt_dma64(pHba)) {
2993 pHba->sg_tablesize
2994 = ((pHba->status_block->inbound_frame_size * 4
2995 - 14 * sizeof(u32))
2996 / (sizeof(struct sg_simple_element) + sizeof(u32)));
2997 } else {
2998 pHba->sg_tablesize
2999 = ((pHba->status_block->inbound_frame_size * 4
3000 - 12 * sizeof(u32))
3001 / sizeof(struct sg_simple_element));
3002 }
3003 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3004 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3005 }
3006
3007
3008#ifdef DEBUG
3009 printk("dpti%d: State = ",pHba->unit);
3010 switch(pHba->status_block->iop_state) {
3011 case 0x01:
3012 printk("INIT\n");
3013 break;
3014 case 0x02:
3015 printk("RESET\n");
3016 break;
3017 case 0x04:
3018 printk("HOLD\n");
3019 break;
3020 case 0x05:
3021 printk("READY\n");
3022 break;
3023 case 0x08:
3024 printk("OPERATIONAL\n");
3025 break;
3026 case 0x10:
3027 printk("FAILED\n");
3028 break;
3029 case 0x11:
3030 printk("FAULTED\n");
3031 break;
3032 default:
3033 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3034 }
3035#endif
3036 return 0;
3037}
3038
3039
3040
3041
3042static int adpt_i2o_lct_get(adpt_hba* pHba)
3043{
3044 u32 msg[8];
3045 int ret;
3046 u32 buf[16];
3047
3048 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3049 pHba->lct_size = pHba->status_block->expected_lct_size;
3050 }
3051 do {
3052 if (pHba->lct == NULL) {
3053 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3054 pHba->lct_size, &pHba->lct_pa,
3055 GFP_ATOMIC);
3056 if(pHba->lct == NULL) {
3057 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3058 pHba->name);
3059 return -ENOMEM;
3060 }
3061 }
3062 memset(pHba->lct, 0, pHba->lct_size);
3063
3064 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3065 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3066 msg[2] = 0;
3067 msg[3] = 0;
3068 msg[4] = 0xFFFFFFFF;
3069 msg[5] = 0x00000000;
3070 msg[6] = 0xD0000000|pHba->lct_size;
3071 msg[7] = (u32)pHba->lct_pa;
3072
3073 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3074 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3075 pHba->name, ret);
3076 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3077 return ret;
3078 }
3079
3080 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3081 pHba->lct_size = pHba->lct->table_size << 2;
3082 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3083 pHba->lct, pHba->lct_pa);
3084 pHba->lct = NULL;
3085 }
3086 } while (pHba->lct == NULL);
3087
3088 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3089
3090
3091
3092 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3093 pHba->FwDebugBufferSize = buf[1];
3094 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3095 pHba->FwDebugBufferSize);
3096 if (pHba->FwDebugBuffer_P) {
3097 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3098 FW_DEBUG_FLAGS_OFFSET;
3099 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3100 FW_DEBUG_BLED_OFFSET;
3101 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3102 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3103 FW_DEBUG_STR_LENGTH_OFFSET;
3104 pHba->FwDebugBuffer_P += buf[2];
3105 pHba->FwDebugFlags = 0;
3106 }
3107 }
3108
3109 return 0;
3110}
3111
3112static int adpt_i2o_build_sys_table(void)
3113{
3114 adpt_hba* pHba = hba_chain;
3115 int count = 0;
3116
3117 if (sys_tbl)
3118 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3119 sys_tbl, sys_tbl_pa);
3120
3121 sys_tbl_len = sizeof(struct i2o_sys_tbl) +
3122 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3123
3124 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3125 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3126 if (!sys_tbl) {
3127 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3128 return -ENOMEM;
3129 }
3130 memset(sys_tbl, 0, sys_tbl_len);
3131
3132 sys_tbl->num_entries = hba_count;
3133 sys_tbl->version = I2OVERSION;
3134 sys_tbl->change_ind = sys_tbl_ind++;
3135
3136 for(pHba = hba_chain; pHba; pHba = pHba->next) {
3137 u64 addr;
3138
3139 if (adpt_i2o_status_get(pHba)) {
3140 sys_tbl->num_entries--;
3141 continue;
3142 }
3143
3144 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3145 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3146 sys_tbl->iops[count].seg_num = 0;
3147 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3148 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3149 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3150 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3151 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1;
3152 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3153 addr = pHba->base_addr_phys + 0x40;
3154 sys_tbl->iops[count].inbound_low = dma_low(addr);
3155 sys_tbl->iops[count].inbound_high = dma_high(addr);
3156
3157 count++;
3158 }
3159
3160#ifdef DEBUG
3161{
3162 u32 *table = (u32*)sys_tbl;
3163 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3164 for(count = 0; count < (sys_tbl_len >>2); count++) {
3165 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3166 count, table[count]);
3167 }
3168}
3169#endif
3170
3171 return 0;
3172}
3173
3174
3175
3176
3177
3178
3179static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3180{
3181 char buf[64];
3182 int unit = d->lct_data.tid;
3183
3184 printk(KERN_INFO "TID %3.3d ", unit);
3185
3186 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3187 {
3188 buf[16]=0;
3189 printk(" Vendor: %-12.12s", buf);
3190 }
3191 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3192 {
3193 buf[16]=0;
3194 printk(" Device: %-12.12s", buf);
3195 }
3196 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3197 {
3198 buf[8]=0;
3199 printk(" Rev: %-12.12s\n", buf);
3200 }
3201#ifdef DEBUG
3202 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3203 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3204 printk(KERN_INFO "\tFlags: ");
3205
3206 if(d->lct_data.device_flags&(1<<0))
3207 printk("C");
3208 if(d->lct_data.device_flags&(1<<1))
3209 printk("U");
3210 if(!(d->lct_data.device_flags&(1<<4)))
3211 printk("P");
3212 if(!(d->lct_data.device_flags&(1<<5)))
3213 printk("M");
3214 printk("\n");
3215#endif
3216}
3217
3218#ifdef DEBUG
3219
3220
3221
3222static const char *adpt_i2o_get_class_name(int class)
3223{
3224 int idx = 16;
3225 static char *i2o_class_name[] = {
3226 "Executive",
3227 "Device Driver Module",
3228 "Block Device",
3229 "Tape Device",
3230 "LAN Interface",
3231 "WAN Interface",
3232 "Fibre Channel Port",
3233 "Fibre Channel Device",
3234 "SCSI Device",
3235 "ATE Port",
3236 "ATE Device",
3237 "Floppy Controller",
3238 "Floppy Device",
3239 "Secondary Bus Port",
3240 "Peer Transport Agent",
3241 "Peer Transport",
3242 "Unknown"
3243 };
3244
3245 switch(class&0xFFF) {
3246 case I2O_CLASS_EXECUTIVE:
3247 idx = 0; break;
3248 case I2O_CLASS_DDM:
3249 idx = 1; break;
3250 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3251 idx = 2; break;
3252 case I2O_CLASS_SEQUENTIAL_STORAGE:
3253 idx = 3; break;
3254 case I2O_CLASS_LAN:
3255 idx = 4; break;
3256 case I2O_CLASS_WAN:
3257 idx = 5; break;
3258 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3259 idx = 6; break;
3260 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3261 idx = 7; break;
3262 case I2O_CLASS_SCSI_PERIPHERAL:
3263 idx = 8; break;
3264 case I2O_CLASS_ATE_PORT:
3265 idx = 9; break;
3266 case I2O_CLASS_ATE_PERIPHERAL:
3267 idx = 10; break;
3268 case I2O_CLASS_FLOPPY_CONTROLLER:
3269 idx = 11; break;
3270 case I2O_CLASS_FLOPPY_DEVICE:
3271 idx = 12; break;
3272 case I2O_CLASS_BUS_ADAPTER_PORT:
3273 idx = 13; break;
3274 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3275 idx = 14; break;
3276 case I2O_CLASS_PEER_TRANSPORT:
3277 idx = 15; break;
3278 }
3279 return i2o_class_name[idx];
3280}
3281#endif
3282
3283
3284static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3285{
3286 u32 msg[6];
3287 int ret, size = sizeof(i2o_hrt);
3288
3289 do {
3290 if (pHba->hrt == NULL) {
3291 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3292 size, &pHba->hrt_pa, GFP_KERNEL);
3293 if (pHba->hrt == NULL) {
3294 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3295 return -ENOMEM;
3296 }
3297 }
3298
3299 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3300 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3301 msg[2]= 0;
3302 msg[3]= 0;
3303 msg[4]= (0xD0000000 | size);
3304 msg[5]= (u32)pHba->hrt_pa;
3305
3306 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3307 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3308 return ret;
3309 }
3310
3311 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3312 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3313 dma_free_coherent(&pHba->pDev->dev, size,
3314 pHba->hrt, pHba->hrt_pa);
3315 size = newsize;
3316 pHba->hrt = NULL;
3317 }
3318 } while(pHba->hrt == NULL);
3319 return 0;
3320}
3321
3322
3323
3324
3325static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3326 int group, int field, void *buf, int buflen)
3327{
3328 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3329 u8 *opblk_va;
3330 dma_addr_t opblk_pa;
3331 u8 *resblk_va;
3332 dma_addr_t resblk_pa;
3333
3334 int size;
3335
3336
3337 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3338 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3339 if (resblk_va == NULL) {
3340 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3341 return -ENOMEM;
3342 }
3343
3344 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3345 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3346 if (opblk_va == NULL) {
3347 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3348 resblk_va, resblk_pa);
3349 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3350 pHba->name);
3351 return -ENOMEM;
3352 }
3353 if (field == -1)
3354 opblk[4] = -1;
3355
3356 memcpy(opblk_va, opblk, sizeof(opblk));
3357 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3358 opblk_va, opblk_pa, sizeof(opblk),
3359 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3360 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3361 if (size == -ETIME) {
3362 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3363 resblk_va, resblk_pa);
3364 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3365 return -ETIME;
3366 } else if (size == -EINTR) {
3367 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3368 resblk_va, resblk_pa);
3369 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3370 return -EINTR;
3371 }
3372
3373 memcpy(buf, resblk_va+8, buflen);
3374
3375 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3376 resblk_va, resblk_pa);
3377 if (size < 0)
3378 return size;
3379
3380 return buflen;
3381}
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3393 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3394 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3395{
3396 u32 msg[9];
3397 u32 *res = (u32 *)resblk_va;
3398 int wait_status;
3399
3400 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3401 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3402 msg[2] = 0;
3403 msg[3] = 0;
3404 msg[4] = 0;
3405 msg[5] = 0x54000000 | oplen;
3406 msg[6] = (u32)opblk_pa;
3407 msg[7] = 0xD0000000 | reslen;
3408 msg[8] = (u32)resblk_pa;
3409
3410 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3411 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3412 return wait_status;
3413 }
3414
3415 if (res[1]&0x00FF0000) {
3416 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3417 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3418 pHba->name,
3419 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3420 : "PARAMS_GET",
3421 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3422 return -((res[1] >> 16) & 0xFF);
3423 }
3424
3425 return 4 + ((res[1] & 0x0000FFFF) << 2);
3426}
3427
3428
3429static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3430{
3431 u32 msg[4];
3432 int ret;
3433
3434 adpt_i2o_status_get(pHba);
3435
3436
3437
3438 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3439 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3440 return 0;
3441 }
3442
3443 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3444 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3445 msg[2] = 0;
3446 msg[3] = 0;
3447
3448 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3449 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3450 pHba->unit, -ret);
3451 } else {
3452 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3453 }
3454
3455 adpt_i2o_status_get(pHba);
3456 return ret;
3457}
3458
3459
3460
3461
3462
3463static int adpt_i2o_enable_hba(adpt_hba* pHba)
3464{
3465 u32 msg[4];
3466 int ret;
3467
3468 adpt_i2o_status_get(pHba);
3469 if(!pHba->status_block){
3470 return -ENOMEM;
3471 }
3472
3473 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3474 return 0;
3475
3476 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3477 return -EINVAL;
3478
3479 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3480 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3481 msg[2]= 0;
3482 msg[3]= 0;
3483
3484 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3485 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3486 pHba->name, ret);
3487 } else {
3488 PDEBUG("%s: Enabled.\n", pHba->name);
3489 }
3490
3491 adpt_i2o_status_get(pHba);
3492 return ret;
3493}
3494
3495
3496static int adpt_i2o_systab_send(adpt_hba* pHba)
3497{
3498 u32 msg[12];
3499 int ret;
3500
3501 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3502 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3503 msg[2] = 0;
3504 msg[3] = 0;
3505 msg[4] = (0<<16) | ((pHba->unit+2) << 12);
3506 msg[5] = 0;
3507
3508
3509
3510
3511
3512
3513 msg[6] = 0x54000000 | sys_tbl_len;
3514 msg[7] = (u32)sys_tbl_pa;
3515 msg[8] = 0x54000000 | 0;
3516 msg[9] = 0;
3517 msg[10] = 0xD4000000 | 0;
3518 msg[11] = 0;
3519
3520 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3521 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3522 pHba->name, ret);
3523 }
3524#ifdef DEBUG
3525 else {
3526 PINFO("%s: SysTab set.\n", pHba->name);
3527 }
3528#endif
3529
3530 return ret;
3531 }
3532
3533
3534
3535
3536
3537
3538
3539
3540#ifdef UARTDELAY
3541
3542static static void adpt_delay(int millisec)
3543{
3544 int i;
3545 for (i = 0; i < millisec; i++) {
3546 udelay(1000);
3547 }
3548}
3549
3550#endif
3551
3552static struct scsi_host_template driver_template = {
3553 .module = THIS_MODULE,
3554 .name = "dpt_i2o",
3555 .proc_name = "dpt_i2o",
3556 .show_info = adpt_show_info,
3557 .info = adpt_info,
3558 .queuecommand = adpt_queue,
3559 .eh_abort_handler = adpt_abort,
3560 .eh_device_reset_handler = adpt_device_reset,
3561 .eh_bus_reset_handler = adpt_bus_reset,
3562 .eh_host_reset_handler = adpt_reset,
3563 .bios_param = adpt_bios_param,
3564 .slave_configure = adpt_slave_configure,
3565 .can_queue = MAX_TO_IOP_MESSAGES,
3566 .this_id = 7,
3567 .cmd_per_lun = 1,
3568 .use_clustering = ENABLE_CLUSTERING,
3569};
3570
3571static int __init adpt_init(void)
3572{
3573 int error;
3574 adpt_hba *pHba, *next;
3575
3576 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3577
3578 error = adpt_detect(&driver_template);
3579 if (error < 0)
3580 return error;
3581 if (hba_chain == NULL)
3582 return -ENODEV;
3583
3584 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3585 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3586 if (error)
3587 goto fail;
3588 scsi_scan_host(pHba->host);
3589 }
3590 return 0;
3591fail:
3592 for (pHba = hba_chain; pHba; pHba = next) {
3593 next = pHba->next;
3594 scsi_remove_host(pHba->host);
3595 }
3596 return error;
3597}
3598
3599static void __exit adpt_exit(void)
3600{
3601 adpt_hba *pHba, *next;
3602
3603 for (pHba = hba_chain; pHba; pHba = pHba->next)
3604 scsi_remove_host(pHba->host);
3605 for (pHba = hba_chain; pHba; pHba = next) {
3606 next = pHba->next;
3607 adpt_release(pHba->host);
3608 }
3609}
3610
3611module_init(adpt_init);
3612module_exit(adpt_exit);
3613
3614MODULE_LICENSE("GPL");
3615