1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/module.h>
33
34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36
37
38
39#include <linux/ioctl.h>
40#include <asm/uaccess.h>
41
42#include <linux/stat.h>
43#include <linux/slab.h>
44#include <linux/pci.h>
45#include <linux/proc_fs.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h>
48#include <linux/interrupt.h>
49#include <linux/kernel.h>
50#include <linux/sched.h>
51#include <linux/reboot.h>
52#include <linux/spinlock.h>
53#include <linux/dma-mapping.h>
54
55#include <linux/timer.h>
56#include <linux/string.h>
57#include <linux/ioport.h>
58#include <linux/mutex.h>
59
60#include <asm/processor.h>
61#include <asm/pgtable.h>
62#include <asm/io.h>
63
64#include <scsi/scsi.h>
65#include <scsi/scsi_cmnd.h>
66#include <scsi/scsi_device.h>
67#include <scsi/scsi_host.h>
68#include <scsi/scsi_tcq.h>
69
70#include "dpt/dptsig.h"
71#include "dpti.h"
72
73
74
75
76
77
78static DEFINE_MUTEX(adpt_mutex);
79static dpt_sig_S DPTI_sig = {
80 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81#ifdef __i386__
82 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83#elif defined(__ia64__)
84 PROC_INTEL, PROC_IA64,
85#elif defined(__sparc__)
86 PROC_ULTRASPARC, PROC_ULTRASPARC,
87#elif defined(__alpha__)
88 PROC_ALPHA, PROC_ALPHA,
89#else
90 (-1),(-1),
91#endif
92 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
95};
96
97
98
99
100
101
102
103
104
105static DEFINE_MUTEX(adpt_configuration_lock);
106
107static struct i2o_sys_tbl *sys_tbl;
108static dma_addr_t sys_tbl_pa;
109static int sys_tbl_ind;
110static int sys_tbl_len;
111
112static adpt_hba* hba_chain = NULL;
113static int hba_count = 0;
114
115static struct class *adpt_sysfs_class;
116
117static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
118#ifdef CONFIG_COMPAT
119static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
120#endif
121
122static const struct file_operations adpt_fops = {
123 .unlocked_ioctl = adpt_unlocked_ioctl,
124 .open = adpt_open,
125 .release = adpt_close,
126#ifdef CONFIG_COMPAT
127 .compat_ioctl = compat_adpt_ioctl,
128#endif
129 .llseek = noop_llseek,
130};
131
132
133
134
135struct adpt_i2o_post_wait_data
136{
137 int status;
138 u32 id;
139 adpt_wait_queue_head_t *wq;
140 struct adpt_i2o_post_wait_data *next;
141};
142
143static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
144static u32 adpt_post_wait_id = 0;
145static DEFINE_SPINLOCK(adpt_post_wait_lock);
146
147
148
149
150
151
152
153static inline int dpt_dma64(adpt_hba *pHba)
154{
155 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
156}
157
158static inline u32 dma_high(dma_addr_t addr)
159{
160 return upper_32_bits(addr);
161}
162
163static inline u32 dma_low(dma_addr_t addr)
164{
165 return (u32)addr;
166}
167
168static u8 adpt_read_blink_led(adpt_hba* host)
169{
170 if (host->FwDebugBLEDflag_P) {
171 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
172 return readb(host->FwDebugBLEDvalue_P);
173 }
174 }
175 return 0;
176}
177
178
179
180
181
182
183static struct pci_device_id dptids[] = {
184 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
185 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
186 { 0, }
187};
188MODULE_DEVICE_TABLE(pci,dptids);
189
190static int adpt_detect(struct scsi_host_template* sht)
191{
192 struct pci_dev *pDev = NULL;
193 adpt_hba *pHba;
194 adpt_hba *next;
195
196 PINFO("Detecting Adaptec I2O RAID controllers...\n");
197
198
199 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
200 if(pDev->device == PCI_DPT_DEVICE_ID ||
201 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
202 if(adpt_install_hba(sht, pDev) ){
203 PERROR("Could not Init an I2O RAID device\n");
204 PERROR("Will not try to detect others.\n");
205 return hba_count-1;
206 }
207 pci_dev_get(pDev);
208 }
209 }
210
211
212 for (pHba = hba_chain; pHba; pHba = next) {
213 next = pHba->next;
214
215 if (adpt_i2o_activate_hba(pHba) < 0) {
216 adpt_i2o_delete_hba(pHba);
217 }
218 }
219
220
221
222
223rebuild_sys_tab:
224 if (hba_chain == NULL)
225 return 0;
226
227
228
229
230
231 if (adpt_i2o_build_sys_table() < 0) {
232 adpt_i2o_sys_shutdown();
233 return 0;
234 }
235
236 PDEBUG("HBA's in HOLD state\n");
237
238
239 for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 if (adpt_i2o_online_hba(pHba) < 0) {
241 adpt_i2o_delete_hba(pHba);
242 goto rebuild_sys_tab;
243 }
244 }
245
246
247 PDEBUG("HBA's in OPERATIONAL state\n");
248
249 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
250 for (pHba = hba_chain; pHba; pHba = next) {
251 next = pHba->next;
252 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 if (adpt_i2o_lct_get(pHba) < 0){
254 adpt_i2o_delete_hba(pHba);
255 continue;
256 }
257
258 if (adpt_i2o_parse_lct(pHba) < 0){
259 adpt_i2o_delete_hba(pHba);
260 continue;
261 }
262 adpt_inquiry(pHba);
263 }
264
265 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 if (IS_ERR(adpt_sysfs_class)) {
267 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 adpt_sysfs_class = NULL;
269 }
270
271 for (pHba = hba_chain; pHba; pHba = next) {
272 next = pHba->next;
273 if (adpt_scsi_host_alloc(pHba, sht) < 0){
274 adpt_i2o_delete_hba(pHba);
275 continue;
276 }
277 pHba->initialized = TRUE;
278 pHba->state &= ~DPTI_STATE_RESET;
279 if (adpt_sysfs_class) {
280 struct device *dev = device_create(adpt_sysfs_class,
281 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
282 "dpti%d", pHba->unit);
283 if (IS_ERR(dev)) {
284 printk(KERN_WARNING"dpti%d: unable to "
285 "create device in dpt_i2o class\n",
286 pHba->unit);
287 }
288 }
289 }
290
291
292
293
294 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
295 adpt_i2o_sys_shutdown();
296 return 0;
297 }
298 return hba_count;
299}
300
301
302
303
304
305static int adpt_release(struct Scsi_Host *host)
306{
307 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
308
309 adpt_i2o_delete_hba(pHba);
310 scsi_unregister(host);
311 return 0;
312}
313
314
315static void adpt_inquiry(adpt_hba* pHba)
316{
317 u32 msg[17];
318 u32 *mptr;
319 u32 *lenptr;
320 int direction;
321 int scsidir;
322 u32 len;
323 u32 reqlen;
324 u8* buf;
325 dma_addr_t addr;
326 u8 scb[16];
327 s32 rcode;
328
329 memset(msg, 0, sizeof(msg));
330 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
331 if(!buf){
332 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
333 return;
334 }
335 memset((void*)buf, 0, 36);
336
337 len = 36;
338 direction = 0x00000000;
339 scsidir =0x40000000;
340
341 if (dpt_dma64(pHba))
342 reqlen = 17;
343 else
344 reqlen = 14;
345
346 msg[0] = reqlen<<16 | SGL_OFFSET_12;
347 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
348 msg[2] = 0;
349 msg[3] = 0;
350
351 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
352 msg[5] = ADAPTER_TID | 1<<16 ;
353
354
355
356
357 msg[6] = scsidir|0x20a00000| 6 ;
358
359 mptr=msg+7;
360
361 memset(scb, 0, sizeof(scb));
362
363 scb[0] = INQUIRY;
364 scb[1] = 0;
365 scb[2] = 0;
366 scb[3] = 0;
367 scb[4] = 36;
368 scb[5] = 0;
369
370
371 memcpy(mptr, scb, sizeof(scb));
372 mptr+=4;
373 lenptr=mptr++;
374
375
376 *lenptr = len;
377 if (dpt_dma64(pHba)) {
378 *mptr++ = (0x7C<<24)+(2<<16)+0x02;
379 *mptr++ = 1 << PAGE_SHIFT;
380 *mptr++ = 0xD0000000|direction|len;
381 *mptr++ = dma_low(addr);
382 *mptr++ = dma_high(addr);
383 } else {
384 *mptr++ = 0xD0000000|direction|len;
385 *mptr++ = addr;
386 }
387
388
389 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
390 if (rcode != 0) {
391 sprintf(pHba->detail, "Adaptec I2O RAID");
392 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
393 if (rcode != -ETIME && rcode != -EINTR)
394 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
395 } else {
396 memset(pHba->detail, 0, sizeof(pHba->detail));
397 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
398 memcpy(&(pHba->detail[16]), " Model: ", 8);
399 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
400 memcpy(&(pHba->detail[40]), " FW: ", 4);
401 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
402 pHba->detail[48] = '\0';
403 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
404 }
405 adpt_i2o_status_get(pHba);
406 return ;
407}
408
409
410static int adpt_slave_configure(struct scsi_device * device)
411{
412 struct Scsi_Host *host = device->host;
413 adpt_hba* pHba;
414
415 pHba = (adpt_hba *) host->hostdata[0];
416
417 if (host->can_queue && device->tagged_supported) {
418 scsi_change_queue_depth(device,
419 host->can_queue - 1);
420 }
421 return 0;
422}
423
424static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
425{
426 adpt_hba* pHba = NULL;
427 struct adpt_device* pDev = NULL;
428
429 cmd->scsi_done = done;
430
431
432
433
434
435
436
437 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
438 cmd->result = (DID_OK << 16);
439 cmd->scsi_done(cmd);
440 return 0;
441 }
442
443 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
444 if (!pHba) {
445 return FAILED;
446 }
447
448 rmb();
449 if ((pHba->state) & DPTI_STATE_RESET)
450 return SCSI_MLQUEUE_HOST_BUSY;
451
452
453
454 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
455
456
457
458
459
460 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
461
462
463 cmd->result = (DID_NO_CONNECT << 16);
464 cmd->scsi_done(cmd);
465 return 0;
466 }
467 cmd->device->hostdata = pDev;
468 }
469 pDev->pScsi_dev = cmd->device;
470
471
472
473
474
475 if (pDev->state & DPTI_DEV_RESET ) {
476 return FAILED;
477 }
478 return adpt_scsi_to_i2o(pHba, cmd, pDev);
479}
480
481static DEF_SCSI_QCMD(adpt_queue)
482
483static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
484 sector_t capacity, int geom[])
485{
486 int heads=-1;
487 int sectors=-1;
488 int cylinders=-1;
489
490
491
492
493 if (capacity < 0x2000 ) {
494 heads = 18;
495 sectors = 2;
496 }
497
498 else if (capacity < 0x20000) {
499 heads = 64;
500 sectors = 32;
501 }
502
503 else if (capacity < 0x40000) {
504 heads = 65;
505 sectors = 63;
506 }
507
508 else if (capacity < 0x80000) {
509 heads = 128;
510 sectors = 63;
511 }
512
513 else {
514 heads = 255;
515 sectors = 63;
516 }
517 cylinders = sector_div(capacity, heads * sectors);
518
519
520 if(sdev->type == 5) {
521 heads = 252;
522 sectors = 63;
523 cylinders = 1111;
524 }
525
526 geom[0] = heads;
527 geom[1] = sectors;
528 geom[2] = cylinders;
529
530 PDEBUG("adpt_bios_param: exit\n");
531 return 0;
532}
533
534
535static const char *adpt_info(struct Scsi_Host *host)
536{
537 adpt_hba* pHba;
538
539 pHba = (adpt_hba *) host->hostdata[0];
540 return (char *) (pHba->detail);
541}
542
543static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
544{
545 struct adpt_device* d;
546 int id;
547 int chan;
548 adpt_hba* pHba;
549 int unit;
550
551
552 mutex_lock(&adpt_configuration_lock);
553 for (pHba = hba_chain; pHba; pHba = pHba->next) {
554 if (pHba->host == host) {
555 break;
556 }
557 }
558 mutex_unlock(&adpt_configuration_lock);
559 if (pHba == NULL) {
560 return 0;
561 }
562 host = pHba->host;
563
564 seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
565 seq_printf(m, "%s\n", pHba->detail);
566 seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
567 pHba->host->host_no, pHba->name, host->irq);
568 seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
569 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
570
571 seq_printf(m, "Devices:\n");
572 for(chan = 0; chan < MAX_CHANNEL; chan++) {
573 for(id = 0; id < MAX_ID; id++) {
574 d = pHba->channel[chan].device[id];
575 while(d) {
576 seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
577 seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
578
579 unit = d->pI2o_dev->lct_data.tid;
580 seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
581 unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
582 scsi_device_online(d->pScsi_dev)? "online":"offline");
583 d = d->next_lun;
584 }
585 }
586 }
587 return 0;
588}
589
590
591
592
593static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
594{
595 return (u32)cmd->serial_number;
596}
597
598
599
600
601
602static struct scsi_cmnd *
603 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
604{
605 struct scsi_cmnd * cmd;
606 struct scsi_device * d;
607
608 if (context == 0)
609 return NULL;
610
611 spin_unlock(pHba->host->host_lock);
612 shost_for_each_device(d, pHba->host) {
613 unsigned long flags;
614 spin_lock_irqsave(&d->list_lock, flags);
615 list_for_each_entry(cmd, &d->cmd_list, list) {
616 if (((u32)cmd->serial_number == context)) {
617 spin_unlock_irqrestore(&d->list_lock, flags);
618 scsi_device_put(d);
619 spin_lock(pHba->host->host_lock);
620 return cmd;
621 }
622 }
623 spin_unlock_irqrestore(&d->list_lock, flags);
624 }
625 spin_lock(pHba->host->host_lock);
626
627 return NULL;
628}
629
630
631
632
633static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
634{
635#if BITS_PER_LONG == 32
636 return (u32)(unsigned long)reply;
637#else
638 ulong flags = 0;
639 u32 nr, i;
640
641 spin_lock_irqsave(pHba->host->host_lock, flags);
642 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
643 for (i = 0; i < nr; i++) {
644 if (pHba->ioctl_reply_context[i] == NULL) {
645 pHba->ioctl_reply_context[i] = reply;
646 break;
647 }
648 }
649 spin_unlock_irqrestore(pHba->host->host_lock, flags);
650 if (i >= nr) {
651 kfree (reply);
652 printk(KERN_WARNING"%s: Too many outstanding "
653 "ioctl commands\n", pHba->name);
654 return (u32)-1;
655 }
656
657 return i;
658#endif
659}
660
661
662
663
664static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
665{
666#if BITS_PER_LONG == 32
667 return (void *)(unsigned long)context;
668#else
669 void *p = pHba->ioctl_reply_context[context];
670 pHba->ioctl_reply_context[context] = NULL;
671
672 return p;
673#endif
674}
675
676
677
678
679
680
681static int adpt_abort(struct scsi_cmnd * cmd)
682{
683 adpt_hba* pHba = NULL;
684 struct adpt_device* dptdevice;
685 u32 msg[5];
686 int rcode;
687
688 if(cmd->serial_number == 0){
689 return FAILED;
690 }
691 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
692 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
693 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
694 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
695 return FAILED;
696 }
697
698 memset(msg, 0, sizeof(msg));
699 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
700 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
701 msg[2] = 0;
702 msg[3]= 0;
703 msg[4] = adpt_cmd_to_context(cmd);
704 if (pHba->host)
705 spin_lock_irq(pHba->host->host_lock);
706 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
707 if (pHba->host)
708 spin_unlock_irq(pHba->host->host_lock);
709 if (rcode != 0) {
710 if(rcode == -EOPNOTSUPP ){
711 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
712 return FAILED;
713 }
714 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
715 return FAILED;
716 }
717 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
718 return SUCCESS;
719}
720
721
722#define I2O_DEVICE_RESET 0x27
723
724
725
726static int adpt_device_reset(struct scsi_cmnd* cmd)
727{
728 adpt_hba* pHba;
729 u32 msg[4];
730 u32 rcode;
731 int old_state;
732 struct adpt_device* d = cmd->device->hostdata;
733
734 pHba = (void*) cmd->device->host->hostdata[0];
735 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
736 if (!d) {
737 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
738 return FAILED;
739 }
740 memset(msg, 0, sizeof(msg));
741 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
742 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
743 msg[2] = 0;
744 msg[3] = 0;
745
746 if (pHba->host)
747 spin_lock_irq(pHba->host->host_lock);
748 old_state = d->state;
749 d->state |= DPTI_DEV_RESET;
750 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
751 d->state = old_state;
752 if (pHba->host)
753 spin_unlock_irq(pHba->host->host_lock);
754 if (rcode != 0) {
755 if(rcode == -EOPNOTSUPP ){
756 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
757 return FAILED;
758 }
759 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
760 return FAILED;
761 } else {
762 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
763 return SUCCESS;
764 }
765}
766
767
768#define I2O_HBA_BUS_RESET 0x87
769
770static int adpt_bus_reset(struct scsi_cmnd* cmd)
771{
772 adpt_hba* pHba;
773 u32 msg[4];
774 u32 rcode;
775
776 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
777 memset(msg, 0, sizeof(msg));
778 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
779 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
780 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
781 msg[2] = 0;
782 msg[3] = 0;
783 if (pHba->host)
784 spin_lock_irq(pHba->host->host_lock);
785 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
786 if (pHba->host)
787 spin_unlock_irq(pHba->host->host_lock);
788 if (rcode != 0) {
789 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
790 return FAILED;
791 } else {
792 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
793 return SUCCESS;
794 }
795}
796
797
798static int __adpt_reset(struct scsi_cmnd* cmd)
799{
800 adpt_hba* pHba;
801 int rcode;
802 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
803 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
804 rcode = adpt_hba_reset(pHba);
805 if(rcode == 0){
806 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
807 return SUCCESS;
808 } else {
809 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
810 return FAILED;
811 }
812}
813
814static int adpt_reset(struct scsi_cmnd* cmd)
815{
816 int rc;
817
818 spin_lock_irq(cmd->device->host->host_lock);
819 rc = __adpt_reset(cmd);
820 spin_unlock_irq(cmd->device->host->host_lock);
821
822 return rc;
823}
824
825
826static int adpt_hba_reset(adpt_hba* pHba)
827{
828 int rcode;
829
830 pHba->state |= DPTI_STATE_RESET;
831
832
833 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
834 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
835 adpt_i2o_delete_hba(pHba);
836 return rcode;
837 }
838
839 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
840 adpt_i2o_delete_hba(pHba);
841 return rcode;
842 }
843 PDEBUG("%s: in HOLD state\n",pHba->name);
844
845 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
846 adpt_i2o_delete_hba(pHba);
847 return rcode;
848 }
849 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
850
851 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
852 adpt_i2o_delete_hba(pHba);
853 return rcode;
854 }
855
856 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
857 adpt_i2o_delete_hba(pHba);
858 return rcode;
859 }
860 pHba->state &= ~DPTI_STATE_RESET;
861
862 adpt_fail_posted_scbs(pHba);
863 return 0;
864}
865
866
867
868
869
870
871
872static void adpt_i2o_sys_shutdown(void)
873{
874 adpt_hba *pHba, *pNext;
875 struct adpt_i2o_post_wait_data *p1, *old;
876
877 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
878 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
879
880
881
882
883 for (pHba = hba_chain; pHba; pHba = pNext) {
884 pNext = pHba->next;
885 adpt_i2o_delete_hba(pHba);
886 }
887
888
889
890
891
892
893 for(p1 = adpt_post_wait_queue; p1;) {
894 old = p1;
895 p1 = p1->next;
896 kfree(old);
897 }
898
899 adpt_post_wait_queue = NULL;
900
901 printk(KERN_INFO "Adaptec I2O controllers down.\n");
902}
903
904static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
905{
906
907 adpt_hba* pHba = NULL;
908 adpt_hba* p = NULL;
909 ulong base_addr0_phys = 0;
910 ulong base_addr1_phys = 0;
911 u32 hba_map0_area_size = 0;
912 u32 hba_map1_area_size = 0;
913 void __iomem *base_addr_virt = NULL;
914 void __iomem *msg_addr_virt = NULL;
915 int dma64 = 0;
916
917 int raptorFlag = FALSE;
918
919 if(pci_enable_device(pDev)) {
920 return -EINVAL;
921 }
922
923 if (pci_request_regions(pDev, "dpt_i2o")) {
924 PERROR("dpti: adpt_config_hba: pci request region failed\n");
925 return -EINVAL;
926 }
927
928 pci_set_master(pDev);
929
930
931
932
933 if (sizeof(dma_addr_t) > 4 &&
934 pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
935 if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
936 dma64 = 1;
937 }
938 if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
939 return -EINVAL;
940
941
942 pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
943
944 base_addr0_phys = pci_resource_start(pDev,0);
945 hba_map0_area_size = pci_resource_len(pDev,0);
946
947
948 if(pDev->device == PCI_DPT_DEVICE_ID){
949 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
950
951 hba_map0_area_size = 0x400000;
952 } else {
953 if(hba_map0_area_size > 0x100000 ){
954 hba_map0_area_size = 0x100000;
955 }
956 }
957 } else {
958
959 base_addr1_phys = pci_resource_start(pDev,1);
960 hba_map1_area_size = pci_resource_len(pDev,1);
961 raptorFlag = TRUE;
962 }
963
964#if BITS_PER_LONG == 64
965
966
967
968
969
970
971
972 if (raptorFlag == TRUE) {
973 if (hba_map0_area_size > 128)
974 hba_map0_area_size = 128;
975 if (hba_map1_area_size > 524288)
976 hba_map1_area_size = 524288;
977 } else {
978 if (hba_map0_area_size > 524288)
979 hba_map0_area_size = 524288;
980 }
981#endif
982
983 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
984 if (!base_addr_virt) {
985 pci_release_regions(pDev);
986 PERROR("dpti: adpt_config_hba: io remap failed\n");
987 return -EINVAL;
988 }
989
990 if(raptorFlag == TRUE) {
991 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
992 if (!msg_addr_virt) {
993 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
994 iounmap(base_addr_virt);
995 pci_release_regions(pDev);
996 return -EINVAL;
997 }
998 } else {
999 msg_addr_virt = base_addr_virt;
1000 }
1001
1002
1003 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1004 if (!pHba) {
1005 if (msg_addr_virt != base_addr_virt)
1006 iounmap(msg_addr_virt);
1007 iounmap(base_addr_virt);
1008 pci_release_regions(pDev);
1009 return -ENOMEM;
1010 }
1011
1012 mutex_lock(&adpt_configuration_lock);
1013
1014 if(hba_chain != NULL){
1015 for(p = hba_chain; p->next; p = p->next);
1016 p->next = pHba;
1017 } else {
1018 hba_chain = pHba;
1019 }
1020 pHba->next = NULL;
1021 pHba->unit = hba_count;
1022 sprintf(pHba->name, "dpti%d", hba_count);
1023 hba_count++;
1024
1025 mutex_unlock(&adpt_configuration_lock);
1026
1027 pHba->pDev = pDev;
1028 pHba->base_addr_phys = base_addr0_phys;
1029
1030
1031 pHba->base_addr_virt = base_addr_virt;
1032 pHba->msg_addr_virt = msg_addr_virt;
1033 pHba->irq_mask = base_addr_virt+0x30;
1034 pHba->post_port = base_addr_virt+0x40;
1035 pHba->reply_port = base_addr_virt+0x44;
1036
1037 pHba->hrt = NULL;
1038 pHba->lct = NULL;
1039 pHba->lct_size = 0;
1040 pHba->status_block = NULL;
1041 pHba->post_count = 0;
1042 pHba->state = DPTI_STATE_RESET;
1043 pHba->pDev = pDev;
1044 pHba->devices = NULL;
1045 pHba->dma64 = dma64;
1046
1047
1048 spin_lock_init(&pHba->state_lock);
1049 spin_lock_init(&adpt_post_wait_lock);
1050
1051 if(raptorFlag == 0){
1052 printk(KERN_INFO "Adaptec I2O RAID controller"
1053 " %d at %p size=%x irq=%d%s\n",
1054 hba_count-1, base_addr_virt,
1055 hba_map0_area_size, pDev->irq,
1056 dma64 ? " (64-bit DMA)" : "");
1057 } else {
1058 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1059 hba_count-1, pDev->irq,
1060 dma64 ? " (64-bit DMA)" : "");
1061 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1062 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1063 }
1064
1065 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1066 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1067 adpt_i2o_delete_hba(pHba);
1068 return -EINVAL;
1069 }
1070
1071 return 0;
1072}
1073
1074
1075static void adpt_i2o_delete_hba(adpt_hba* pHba)
1076{
1077 adpt_hba* p1;
1078 adpt_hba* p2;
1079 struct i2o_device* d;
1080 struct i2o_device* next;
1081 int i;
1082 int j;
1083 struct adpt_device* pDev;
1084 struct adpt_device* pNext;
1085
1086
1087 mutex_lock(&adpt_configuration_lock);
1088
1089
1090 if(pHba->host){
1091 free_irq(pHba->host->irq, pHba);
1092 }
1093 p2 = NULL;
1094 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1095 if(p1 == pHba) {
1096 if(p2) {
1097 p2->next = p1->next;
1098 } else {
1099 hba_chain = p1->next;
1100 }
1101 break;
1102 }
1103 }
1104
1105 hba_count--;
1106 mutex_unlock(&adpt_configuration_lock);
1107
1108 iounmap(pHba->base_addr_virt);
1109 pci_release_regions(pHba->pDev);
1110 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1111 iounmap(pHba->msg_addr_virt);
1112 }
1113 if(pHba->FwDebugBuffer_P)
1114 iounmap(pHba->FwDebugBuffer_P);
1115 if(pHba->hrt) {
1116 dma_free_coherent(&pHba->pDev->dev,
1117 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1118 pHba->hrt, pHba->hrt_pa);
1119 }
1120 if(pHba->lct) {
1121 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1122 pHba->lct, pHba->lct_pa);
1123 }
1124 if(pHba->status_block) {
1125 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1126 pHba->status_block, pHba->status_block_pa);
1127 }
1128 if(pHba->reply_pool) {
1129 dma_free_coherent(&pHba->pDev->dev,
1130 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1131 pHba->reply_pool, pHba->reply_pool_pa);
1132 }
1133
1134 for(d = pHba->devices; d ; d = next){
1135 next = d->next;
1136 kfree(d);
1137 }
1138 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1139 for(j = 0; j < MAX_ID; j++){
1140 if(pHba->channel[i].device[j] != NULL){
1141 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1142 pNext = pDev->next_lun;
1143 kfree(pDev);
1144 }
1145 }
1146 }
1147 }
1148 pci_dev_put(pHba->pDev);
1149 if (adpt_sysfs_class)
1150 device_destroy(adpt_sysfs_class,
1151 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1152 kfree(pHba);
1153
1154 if(hba_count <= 0){
1155 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1156 if (adpt_sysfs_class) {
1157 class_destroy(adpt_sysfs_class);
1158 adpt_sysfs_class = NULL;
1159 }
1160 }
1161}
1162
1163static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1164{
1165 struct adpt_device* d;
1166
1167 if(chan < 0 || chan >= MAX_CHANNEL)
1168 return NULL;
1169
1170 if( pHba->channel[chan].device == NULL){
1171 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1172 return NULL;
1173 }
1174
1175 d = pHba->channel[chan].device[id];
1176 if(!d || d->tid == 0) {
1177 return NULL;
1178 }
1179
1180
1181 if(d->scsi_lun == lun){
1182 return d;
1183 }
1184
1185
1186 for(d=d->next_lun ; d ; d = d->next_lun){
1187 if(d->scsi_lun == lun){
1188 return d;
1189 }
1190 }
1191 return NULL;
1192}
1193
1194
1195static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1196{
1197
1198
1199
1200 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1201 int status = 0;
1202 ulong flags = 0;
1203 struct adpt_i2o_post_wait_data *p1, *p2;
1204 struct adpt_i2o_post_wait_data *wait_data =
1205 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1206 DECLARE_WAITQUEUE(wait, current);
1207
1208 if (!wait_data)
1209 return -ENOMEM;
1210
1211
1212
1213
1214
1215 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1216
1217
1218 wait_data->next = adpt_post_wait_queue;
1219 adpt_post_wait_queue = wait_data;
1220 adpt_post_wait_id++;
1221 adpt_post_wait_id &= 0x7fff;
1222 wait_data->id = adpt_post_wait_id;
1223 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1224
1225 wait_data->wq = &adpt_wq_i2o_post;
1226 wait_data->status = -ETIMEDOUT;
1227
1228 add_wait_queue(&adpt_wq_i2o_post, &wait);
1229
1230 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1231 timeout *= HZ;
1232 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1233 set_current_state(TASK_INTERRUPTIBLE);
1234 if(pHba->host)
1235 spin_unlock_irq(pHba->host->host_lock);
1236 if (!timeout)
1237 schedule();
1238 else{
1239 timeout = schedule_timeout(timeout);
1240 if (timeout == 0) {
1241
1242
1243
1244 status = -ETIME;
1245 }
1246 }
1247 if(pHba->host)
1248 spin_lock_irq(pHba->host->host_lock);
1249 }
1250 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1251
1252 if(status == -ETIMEDOUT){
1253 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1254
1255 return status;
1256 }
1257
1258
1259 p2 = NULL;
1260 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1261 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1262 if(p1 == wait_data) {
1263 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1264 status = -EOPNOTSUPP;
1265 }
1266 if(p2) {
1267 p2->next = p1->next;
1268 } else {
1269 adpt_post_wait_queue = p1->next;
1270 }
1271 break;
1272 }
1273 }
1274 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1275
1276 kfree(wait_data);
1277
1278 return status;
1279}
1280
1281
1282static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1283{
1284
1285 u32 m = EMPTY_QUEUE;
1286 u32 __iomem *msg;
1287 ulong timeout = jiffies + 30*HZ;
1288 do {
1289 rmb();
1290 m = readl(pHba->post_port);
1291 if (m != EMPTY_QUEUE) {
1292 break;
1293 }
1294 if(time_after(jiffies,timeout)){
1295 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1296 return -ETIMEDOUT;
1297 }
1298 schedule_timeout_uninterruptible(1);
1299 } while(m == EMPTY_QUEUE);
1300
1301 msg = pHba->msg_addr_virt + m;
1302 memcpy_toio(msg, data, len);
1303 wmb();
1304
1305
1306 writel(m, pHba->post_port);
1307 wmb();
1308
1309 return 0;
1310}
1311
1312
1313static void adpt_i2o_post_wait_complete(u32 context, int status)
1314{
1315 struct adpt_i2o_post_wait_data *p1 = NULL;
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329 context &= 0x7fff;
1330
1331 spin_lock(&adpt_post_wait_lock);
1332 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1333 if(p1->id == context) {
1334 p1->status = status;
1335 spin_unlock(&adpt_post_wait_lock);
1336 wake_up_interruptible(p1->wq);
1337 return;
1338 }
1339 }
1340 spin_unlock(&adpt_post_wait_lock);
1341
1342 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1343 printk(KERN_DEBUG" Tasks in wait queue:\n");
1344 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1345 printk(KERN_DEBUG" %d\n",p1->id);
1346 }
1347 return;
1348}
1349
1350static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1351{
1352 u32 msg[8];
1353 u8* status;
1354 dma_addr_t addr;
1355 u32 m = EMPTY_QUEUE ;
1356 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1357
1358 if(pHba->initialized == FALSE) {
1359 timeout = jiffies + (25*HZ);
1360 } else {
1361 adpt_i2o_quiesce_hba(pHba);
1362 }
1363
1364 do {
1365 rmb();
1366 m = readl(pHba->post_port);
1367 if (m != EMPTY_QUEUE) {
1368 break;
1369 }
1370 if(time_after(jiffies,timeout)){
1371 printk(KERN_WARNING"Timeout waiting for message!\n");
1372 return -ETIMEDOUT;
1373 }
1374 schedule_timeout_uninterruptible(1);
1375 } while (m == EMPTY_QUEUE);
1376
1377 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1378 if(status == NULL) {
1379 adpt_send_nop(pHba, m);
1380 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1381 return -ENOMEM;
1382 }
1383 memset(status,0,4);
1384
1385 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1386 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1387 msg[2]=0;
1388 msg[3]=0;
1389 msg[4]=0;
1390 msg[5]=0;
1391 msg[6]=dma_low(addr);
1392 msg[7]=dma_high(addr);
1393
1394 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1395 wmb();
1396 writel(m, pHba->post_port);
1397 wmb();
1398
1399 while(*status == 0){
1400 if(time_after(jiffies,timeout)){
1401 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1402
1403
1404
1405
1406 return -ETIMEDOUT;
1407 }
1408 rmb();
1409 schedule_timeout_uninterruptible(1);
1410 }
1411
1412 if(*status == 0x01 ) {
1413 PDEBUG("%s: Reset in progress...\n", pHba->name);
1414
1415
1416 do {
1417 rmb();
1418 m = readl(pHba->post_port);
1419 if (m != EMPTY_QUEUE) {
1420 break;
1421 }
1422 if(time_after(jiffies,timeout)){
1423 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1424
1425
1426
1427
1428 return -ETIMEDOUT;
1429 }
1430 schedule_timeout_uninterruptible(1);
1431 } while (m == EMPTY_QUEUE);
1432
1433 adpt_send_nop(pHba, m);
1434 }
1435 adpt_i2o_status_get(pHba);
1436 if(*status == 0x02 ||
1437 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1438 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1439 pHba->name);
1440 } else {
1441 PDEBUG("%s: Reset completed.\n", pHba->name);
1442 }
1443
1444 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1445#ifdef UARTDELAY
1446
1447
1448 adpt_delay(20000);
1449#endif
1450 return 0;
1451}
1452
1453
1454static int adpt_i2o_parse_lct(adpt_hba* pHba)
1455{
1456 int i;
1457 int max;
1458 int tid;
1459 struct i2o_device *d;
1460 i2o_lct *lct = pHba->lct;
1461 u8 bus_no = 0;
1462 s16 scsi_id;
1463 u64 scsi_lun;
1464 u32 buf[10];
1465 struct adpt_device* pDev;
1466
1467 if (lct == NULL) {
1468 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1469 return -1;
1470 }
1471
1472 max = lct->table_size;
1473 max -= 3;
1474 max /= 9;
1475
1476 for(i=0;i<max;i++) {
1477 if( lct->lct_entry[i].user_tid != 0xfff){
1478
1479
1480
1481
1482
1483
1484
1485 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1486 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1487 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1488 continue;
1489 }
1490 tid = lct->lct_entry[i].tid;
1491
1492 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1493 continue;
1494 }
1495 bus_no = buf[0]>>16;
1496 scsi_id = buf[1];
1497 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1498 if(bus_no >= MAX_CHANNEL) {
1499 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1500 continue;
1501 }
1502 if (scsi_id >= MAX_ID){
1503 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1504 continue;
1505 }
1506 if(bus_no > pHba->top_scsi_channel){
1507 pHba->top_scsi_channel = bus_no;
1508 }
1509 if(scsi_id > pHba->top_scsi_id){
1510 pHba->top_scsi_id = scsi_id;
1511 }
1512 if(scsi_lun > pHba->top_scsi_lun){
1513 pHba->top_scsi_lun = scsi_lun;
1514 }
1515 continue;
1516 }
1517 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1518 if(d==NULL)
1519 {
1520 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1521 return -ENOMEM;
1522 }
1523
1524 d->controller = pHba;
1525 d->next = NULL;
1526
1527 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1528
1529 d->flags = 0;
1530 tid = d->lct_data.tid;
1531 adpt_i2o_report_hba_unit(pHba, d);
1532 adpt_i2o_install_device(pHba, d);
1533 }
1534 bus_no = 0;
1535 for(d = pHba->devices; d ; d = d->next) {
1536 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1537 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1538 tid = d->lct_data.tid;
1539
1540
1541 if(bus_no > pHba->top_scsi_channel){
1542 pHba->top_scsi_channel = bus_no;
1543 }
1544 pHba->channel[bus_no].type = d->lct_data.class_id;
1545 pHba->channel[bus_no].tid = tid;
1546 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1547 {
1548 pHba->channel[bus_no].scsi_id = buf[1];
1549 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1550 }
1551
1552 bus_no++;
1553 if(bus_no >= MAX_CHANNEL) {
1554 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1555 break;
1556 }
1557 }
1558 }
1559
1560
1561 for(d = pHba->devices; d ; d = d->next) {
1562 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1563 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1564 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1565
1566 tid = d->lct_data.tid;
1567 scsi_id = -1;
1568
1569 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1570 bus_no = buf[0]>>16;
1571 scsi_id = buf[1];
1572 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1573 if(bus_no >= MAX_CHANNEL) {
1574 continue;
1575 }
1576 if (scsi_id >= MAX_ID) {
1577 continue;
1578 }
1579 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1580 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1581 if(pDev == NULL) {
1582 return -ENOMEM;
1583 }
1584 pHba->channel[bus_no].device[scsi_id] = pDev;
1585 } else {
1586 for( pDev = pHba->channel[bus_no].device[scsi_id];
1587 pDev->next_lun; pDev = pDev->next_lun){
1588 }
1589 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1590 if(pDev->next_lun == NULL) {
1591 return -ENOMEM;
1592 }
1593 pDev = pDev->next_lun;
1594 }
1595 pDev->tid = tid;
1596 pDev->scsi_channel = bus_no;
1597 pDev->scsi_id = scsi_id;
1598 pDev->scsi_lun = scsi_lun;
1599 pDev->pI2o_dev = d;
1600 d->owner = pDev;
1601 pDev->type = (buf[0])&0xff;
1602 pDev->flags = (buf[0]>>8)&0xff;
1603 if(scsi_id > pHba->top_scsi_id){
1604 pHba->top_scsi_id = scsi_id;
1605 }
1606 if(scsi_lun > pHba->top_scsi_lun){
1607 pHba->top_scsi_lun = scsi_lun;
1608 }
1609 }
1610 if(scsi_id == -1){
1611 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1612 d->lct_data.identity_tag);
1613 }
1614 }
1615 }
1616 return 0;
1617}
1618
1619
1620
1621
1622
1623
1624
1625static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1626{
1627 mutex_lock(&adpt_configuration_lock);
1628 d->controller=pHba;
1629 d->owner=NULL;
1630 d->next=pHba->devices;
1631 d->prev=NULL;
1632 if (pHba->devices != NULL){
1633 pHba->devices->prev=d;
1634 }
1635 pHba->devices=d;
1636 *d->dev_name = 0;
1637
1638 mutex_unlock(&adpt_configuration_lock);
1639 return 0;
1640}
1641
1642static int adpt_open(struct inode *inode, struct file *file)
1643{
1644 int minor;
1645 adpt_hba* pHba;
1646
1647 mutex_lock(&adpt_mutex);
1648
1649
1650 minor = iminor(inode);
1651 if (minor >= hba_count) {
1652 mutex_unlock(&adpt_mutex);
1653 return -ENXIO;
1654 }
1655 mutex_lock(&adpt_configuration_lock);
1656 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1657 if (pHba->unit == minor) {
1658 break;
1659 }
1660 }
1661 if (pHba == NULL) {
1662 mutex_unlock(&adpt_configuration_lock);
1663 mutex_unlock(&adpt_mutex);
1664 return -ENXIO;
1665 }
1666
1667
1668
1669
1670
1671
1672 pHba->in_use = 1;
1673 mutex_unlock(&adpt_configuration_lock);
1674 mutex_unlock(&adpt_mutex);
1675
1676 return 0;
1677}
1678
1679static int adpt_close(struct inode *inode, struct file *file)
1680{
1681 int minor;
1682 adpt_hba* pHba;
1683
1684 minor = iminor(inode);
1685 if (minor >= hba_count) {
1686 return -ENXIO;
1687 }
1688 mutex_lock(&adpt_configuration_lock);
1689 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1690 if (pHba->unit == minor) {
1691 break;
1692 }
1693 }
1694 mutex_unlock(&adpt_configuration_lock);
1695 if (pHba == NULL) {
1696 return -ENXIO;
1697 }
1698
1699 pHba->in_use = 0;
1700
1701 return 0;
1702}
1703
1704
1705static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1706{
1707 u32 msg[MAX_MESSAGE_SIZE];
1708 u32* reply = NULL;
1709 u32 size = 0;
1710 u32 reply_size = 0;
1711 u32 __user *user_msg = arg;
1712 u32 __user * user_reply = NULL;
1713 void *sg_list[pHba->sg_tablesize];
1714 u32 sg_offset = 0;
1715 u32 sg_count = 0;
1716 int sg_index = 0;
1717 u32 i = 0;
1718 u32 rcode = 0;
1719 void *p = NULL;
1720 dma_addr_t addr;
1721 ulong flags = 0;
1722
1723 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1724
1725 if(get_user(size, &user_msg[0])){
1726 return -EFAULT;
1727 }
1728 size = size>>16;
1729
1730 user_reply = &user_msg[size];
1731 if(size > MAX_MESSAGE_SIZE){
1732 return -EFAULT;
1733 }
1734 size *= 4;
1735
1736
1737 if(copy_from_user(msg, user_msg, size)) {
1738 return -EFAULT;
1739 }
1740 get_user(reply_size, &user_reply[0]);
1741 reply_size = reply_size>>16;
1742 if(reply_size > REPLY_FRAME_SIZE){
1743 reply_size = REPLY_FRAME_SIZE;
1744 }
1745 reply_size *= 4;
1746 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1747 if(reply == NULL) {
1748 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1749 return -ENOMEM;
1750 }
1751 sg_offset = (msg[0]>>4)&0xf;
1752 msg[2] = 0x40000000;
1753 msg[3] = adpt_ioctl_to_context(pHba, reply);
1754 if (msg[3] == (u32)-1)
1755 return -EBUSY;
1756
1757 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1758 if(sg_offset) {
1759
1760 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1761 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1762 if (sg_count > pHba->sg_tablesize){
1763 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1764 kfree (reply);
1765 return -EINVAL;
1766 }
1767
1768 for(i = 0; i < sg_count; i++) {
1769 int sg_size;
1770
1771 if (!(sg[i].flag_count & 0x10000000 )) {
1772 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1773 rcode = -EINVAL;
1774 goto cleanup;
1775 }
1776 sg_size = sg[i].flag_count & 0xffffff;
1777
1778 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1779 if(!p) {
1780 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1781 pHba->name,sg_size,i,sg_count);
1782 rcode = -ENOMEM;
1783 goto cleanup;
1784 }
1785 sg_list[sg_index++] = p;
1786
1787 if(sg[i].flag_count & 0x04000000 ) {
1788
1789 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1790 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1791 rcode = -EFAULT;
1792 goto cleanup;
1793 }
1794 }
1795
1796 sg[i].addr_bus = addr;
1797 }
1798 }
1799
1800 do {
1801
1802
1803
1804
1805 if (pHba->host) {
1806 scsi_block_requests(pHba->host);
1807 spin_lock_irqsave(pHba->host->host_lock, flags);
1808 }
1809 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1810 if (rcode != 0)
1811 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1812 rcode, reply);
1813 if (pHba->host) {
1814 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1815 scsi_unblock_requests(pHba->host);
1816 }
1817 } while (rcode == -ETIMEDOUT);
1818
1819 if(rcode){
1820 goto cleanup;
1821 }
1822
1823 if(sg_offset) {
1824
1825 u32 j;
1826
1827 struct sg_simple_element* sg;
1828 int sg_size;
1829
1830
1831 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1832
1833 if(get_user(size, &user_msg[0])){
1834 rcode = -EFAULT;
1835 goto cleanup;
1836 }
1837 size = size>>16;
1838 size *= 4;
1839 if (size > MAX_MESSAGE_SIZE) {
1840 rcode = -EINVAL;
1841 goto cleanup;
1842 }
1843
1844 if (copy_from_user (msg, user_msg, size)) {
1845 rcode = -EFAULT;
1846 goto cleanup;
1847 }
1848 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1849
1850
1851 sg = (struct sg_simple_element*)(msg + sg_offset);
1852 for (j = 0; j < sg_count; j++) {
1853
1854 if(! (sg[j].flag_count & 0x4000000 )) {
1855 sg_size = sg[j].flag_count & 0xffffff;
1856
1857 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1858 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1859 rcode = -EFAULT;
1860 goto cleanup;
1861 }
1862 }
1863 }
1864 }
1865
1866
1867 if (reply_size) {
1868
1869 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1870 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1871 rcode = -EFAULT;
1872 }
1873 if(copy_to_user(user_reply, reply, reply_size)) {
1874 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1875 rcode = -EFAULT;
1876 }
1877 }
1878
1879
1880cleanup:
1881 if (rcode != -ETIME && rcode != -EINTR) {
1882 struct sg_simple_element *sg =
1883 (struct sg_simple_element*) (msg +sg_offset);
1884 kfree (reply);
1885 while(sg_index) {
1886 if(sg_list[--sg_index]) {
1887 dma_free_coherent(&pHba->pDev->dev,
1888 sg[sg_index].flag_count & 0xffffff,
1889 sg_list[sg_index],
1890 sg[sg_index].addr_bus);
1891 }
1892 }
1893 }
1894 return rcode;
1895}
1896
1897#if defined __ia64__
1898static void adpt_ia64_info(sysInfo_S* si)
1899{
1900
1901
1902
1903 si->processorType = PROC_IA64;
1904}
1905#endif
1906
1907#if defined __sparc__
1908static void adpt_sparc_info(sysInfo_S* si)
1909{
1910
1911
1912
1913 si->processorType = PROC_ULTRASPARC;
1914}
1915#endif
1916#if defined __alpha__
1917static void adpt_alpha_info(sysInfo_S* si)
1918{
1919
1920
1921
1922 si->processorType = PROC_ALPHA;
1923}
1924#endif
1925
1926#if defined __i386__
1927static void adpt_i386_info(sysInfo_S* si)
1928{
1929
1930
1931
1932 switch (boot_cpu_data.x86) {
1933 case CPU_386:
1934 si->processorType = PROC_386;
1935 break;
1936 case CPU_486:
1937 si->processorType = PROC_486;
1938 break;
1939 case CPU_586:
1940 si->processorType = PROC_PENTIUM;
1941 break;
1942 default:
1943 si->processorType = PROC_PENTIUM;
1944 break;
1945 }
1946}
1947#endif
1948
1949
1950
1951
1952
1953
1954
1955static int adpt_system_info(void __user *buffer)
1956{
1957 sysInfo_S si;
1958
1959 memset(&si, 0, sizeof(si));
1960
1961 si.osType = OS_LINUX;
1962 si.osMajorVersion = 0;
1963 si.osMinorVersion = 0;
1964 si.osRevision = 0;
1965 si.busType = SI_PCI_BUS;
1966 si.processorFamily = DPTI_sig.dsProcessorFamily;
1967
1968#if defined __i386__
1969 adpt_i386_info(&si);
1970#elif defined (__ia64__)
1971 adpt_ia64_info(&si);
1972#elif defined(__sparc__)
1973 adpt_sparc_info(&si);
1974#elif defined (__alpha__)
1975 adpt_alpha_info(&si);
1976#else
1977 si.processorType = 0xff ;
1978#endif
1979 if (copy_to_user(buffer, &si, sizeof(si))){
1980 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1981 return -EFAULT;
1982 }
1983
1984 return 0;
1985}
1986
1987static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1988{
1989 int minor;
1990 int error = 0;
1991 adpt_hba* pHba;
1992 ulong flags = 0;
1993 void __user *argp = (void __user *)arg;
1994
1995 minor = iminor(inode);
1996 if (minor >= DPTI_MAX_HBA){
1997 return -ENXIO;
1998 }
1999 mutex_lock(&adpt_configuration_lock);
2000 for (pHba = hba_chain; pHba; pHba = pHba->next) {
2001 if (pHba->unit == minor) {
2002 break;
2003 }
2004 }
2005 mutex_unlock(&adpt_configuration_lock);
2006 if(pHba == NULL){
2007 return -ENXIO;
2008 }
2009
2010 while((volatile u32) pHba->state & DPTI_STATE_RESET )
2011 schedule_timeout_uninterruptible(2);
2012
2013 switch (cmd) {
2014
2015 case DPT_SIGNATURE:
2016 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2017 return -EFAULT;
2018 }
2019 break;
2020 case I2OUSRCMD:
2021 return adpt_i2o_passthru(pHba, argp);
2022
2023 case DPT_CTRLINFO:{
2024 drvrHBAinfo_S HbaInfo;
2025
2026#define FLG_OSD_PCI_VALID 0x0001
2027#define FLG_OSD_DMA 0x0002
2028#define FLG_OSD_I2O 0x0004
2029 memset(&HbaInfo, 0, sizeof(HbaInfo));
2030 HbaInfo.drvrHBAnum = pHba->unit;
2031 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2032 HbaInfo.blinkState = adpt_read_blink_led(pHba);
2033 HbaInfo.pciBusNum = pHba->pDev->bus->number;
2034 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2035 HbaInfo.Interrupt = pHba->pDev->irq;
2036 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2037 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2038 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2039 return -EFAULT;
2040 }
2041 break;
2042 }
2043 case DPT_SYSINFO:
2044 return adpt_system_info(argp);
2045 case DPT_BLINKLED:{
2046 u32 value;
2047 value = (u32)adpt_read_blink_led(pHba);
2048 if (copy_to_user(argp, &value, sizeof(value))) {
2049 return -EFAULT;
2050 }
2051 break;
2052 }
2053 case I2ORESETCMD:
2054 if(pHba->host)
2055 spin_lock_irqsave(pHba->host->host_lock, flags);
2056 adpt_hba_reset(pHba);
2057 if(pHba->host)
2058 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2059 break;
2060 case I2ORESCANCMD:
2061 adpt_rescan(pHba);
2062 break;
2063 default:
2064 return -EINVAL;
2065 }
2066
2067 return error;
2068}
2069
2070static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2071{
2072 struct inode *inode;
2073 long ret;
2074
2075 inode = file_inode(file);
2076
2077 mutex_lock(&adpt_mutex);
2078 ret = adpt_ioctl(inode, file, cmd, arg);
2079 mutex_unlock(&adpt_mutex);
2080
2081 return ret;
2082}
2083
2084#ifdef CONFIG_COMPAT
2085static long compat_adpt_ioctl(struct file *file,
2086 unsigned int cmd, unsigned long arg)
2087{
2088 struct inode *inode;
2089 long ret;
2090
2091 inode = file_inode(file);
2092
2093 mutex_lock(&adpt_mutex);
2094
2095 switch(cmd) {
2096 case DPT_SIGNATURE:
2097 case I2OUSRCMD:
2098 case DPT_CTRLINFO:
2099 case DPT_SYSINFO:
2100 case DPT_BLINKLED:
2101 case I2ORESETCMD:
2102 case I2ORESCANCMD:
2103 case (DPT_TARGET_BUSY & 0xFFFF):
2104 case DPT_TARGET_BUSY:
2105 ret = adpt_ioctl(inode, file, cmd, arg);
2106 break;
2107 default:
2108 ret = -ENOIOCTLCMD;
2109 }
2110
2111 mutex_unlock(&adpt_mutex);
2112
2113 return ret;
2114}
2115#endif
2116
2117static irqreturn_t adpt_isr(int irq, void *dev_id)
2118{
2119 struct scsi_cmnd* cmd;
2120 adpt_hba* pHba = dev_id;
2121 u32 m;
2122 void __iomem *reply;
2123 u32 status=0;
2124 u32 context;
2125 ulong flags = 0;
2126 int handled = 0;
2127
2128 if (pHba == NULL){
2129 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2130 return IRQ_NONE;
2131 }
2132 if(pHba->host)
2133 spin_lock_irqsave(pHba->host->host_lock, flags);
2134
2135 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2136 m = readl(pHba->reply_port);
2137 if(m == EMPTY_QUEUE){
2138
2139 rmb();
2140 m = readl(pHba->reply_port);
2141 if(m == EMPTY_QUEUE){
2142
2143 printk(KERN_ERR"dpti: Could not get reply frame\n");
2144 goto out;
2145 }
2146 }
2147 if (pHba->reply_pool_pa <= m &&
2148 m < pHba->reply_pool_pa +
2149 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2150 reply = (u8 *)pHba->reply_pool +
2151 (m - pHba->reply_pool_pa);
2152 } else {
2153
2154 printk(KERN_ERR "dpti: reply frame not from pool\n");
2155 reply = (u8 *)bus_to_virt(m);
2156 }
2157
2158 if (readl(reply) & MSG_FAIL) {
2159 u32 old_m = readl(reply+28);
2160 void __iomem *msg;
2161 u32 old_context;
2162 PDEBUG("%s: Failed message\n",pHba->name);
2163 if(old_m >= 0x100000){
2164 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2165 writel(m,pHba->reply_port);
2166 continue;
2167 }
2168
2169 msg = pHba->msg_addr_virt + old_m;
2170 old_context = readl(msg+12);
2171 writel(old_context, reply+12);
2172 adpt_send_nop(pHba, old_m);
2173 }
2174 context = readl(reply+8);
2175 if(context & 0x40000000){
2176 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2177 if( p != NULL) {
2178 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2179 }
2180
2181 }
2182 if(context & 0x80000000){
2183 status = readl(reply+16);
2184 if(status >> 24){
2185 status &= 0xffff;
2186 } else {
2187 status = I2O_POST_WAIT_OK;
2188 }
2189 if(!(context & 0x40000000)) {
2190 cmd = adpt_cmd_from_context(pHba,
2191 readl(reply+12));
2192 if(cmd != NULL) {
2193 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2194 }
2195 }
2196 adpt_i2o_post_wait_complete(context, status);
2197 } else {
2198 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2199 if(cmd != NULL){
2200 scsi_dma_unmap(cmd);
2201 if(cmd->serial_number != 0) {
2202 adpt_i2o_to_scsi(reply, cmd);
2203 }
2204 }
2205 }
2206 writel(m, pHba->reply_port);
2207 wmb();
2208 rmb();
2209 }
2210 handled = 1;
2211out: if(pHba->host)
2212 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2213 return IRQ_RETVAL(handled);
2214}
2215
2216static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2217{
2218 int i;
2219 u32 msg[MAX_MESSAGE_SIZE];
2220 u32* mptr;
2221 u32* lptr;
2222 u32 *lenptr;
2223 int direction;
2224 int scsidir;
2225 int nseg;
2226 u32 len;
2227 u32 reqlen;
2228 s32 rcode;
2229 dma_addr_t addr;
2230
2231 memset(msg, 0 , sizeof(msg));
2232 len = scsi_bufflen(cmd);
2233 direction = 0x00000000;
2234
2235 scsidir = 0x00000000;
2236 if(len) {
2237
2238
2239
2240
2241
2242
2243 switch(cmd->sc_data_direction){
2244 case DMA_FROM_DEVICE:
2245 scsidir =0x40000000;
2246 break;
2247 case DMA_TO_DEVICE:
2248 direction=0x04000000;
2249 scsidir =0x80000000;
2250 break;
2251 case DMA_NONE:
2252 break;
2253 case DMA_BIDIRECTIONAL:
2254 scsidir =0x40000000;
2255
2256 break;
2257 default:
2258 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2259 pHba->name, cmd->cmnd[0]);
2260 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2261 cmd->scsi_done(cmd);
2262 return 0;
2263 }
2264 }
2265
2266
2267 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2268 msg[2] = 0;
2269 msg[3] = adpt_cmd_to_context(cmd);
2270
2271
2272 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2273 msg[5] = d->tid;
2274
2275
2276
2277
2278 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2279
2280 mptr=msg+7;
2281
2282
2283 memset(mptr, 0, 16);
2284 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2285 mptr+=4;
2286 lenptr=mptr++;
2287 if (dpt_dma64(pHba)) {
2288 reqlen = 16;
2289 *mptr++ = (0x7C<<24)+(2<<16)+0x02;
2290 *mptr++ = 1 << PAGE_SHIFT;
2291 } else {
2292 reqlen = 14;
2293 }
2294
2295
2296 nseg = scsi_dma_map(cmd);
2297 BUG_ON(nseg < 0);
2298 if (nseg) {
2299 struct scatterlist *sg;
2300
2301 len = 0;
2302 scsi_for_each_sg(cmd, sg, nseg, i) {
2303 lptr = mptr;
2304 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2305 len+=sg_dma_len(sg);
2306 addr = sg_dma_address(sg);
2307 *mptr++ = dma_low(addr);
2308 if (dpt_dma64(pHba))
2309 *mptr++ = dma_high(addr);
2310
2311 if (i == nseg - 1)
2312 *lptr = direction|0xD0000000|sg_dma_len(sg);
2313 }
2314 reqlen = mptr - msg;
2315 *lenptr = len;
2316
2317 if(cmd->underflow && len != cmd->underflow){
2318 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2319 len, cmd->underflow);
2320 }
2321 } else {
2322 *lenptr = len = 0;
2323 reqlen = 12;
2324 }
2325
2326
2327 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2328
2329
2330 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2331 if (rcode == 0) {
2332 return 0;
2333 }
2334 return rcode;
2335}
2336
2337
2338static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2339{
2340 struct Scsi_Host *host;
2341
2342 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2343 if (host == NULL) {
2344 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2345 return -1;
2346 }
2347 host->hostdata[0] = (unsigned long)pHba;
2348 pHba->host = host;
2349
2350 host->irq = pHba->pDev->irq;
2351
2352
2353
2354 host->io_port = 0;
2355 host->n_io_port = 0;
2356
2357 host->max_id = 16;
2358 host->max_lun = 256;
2359 host->max_channel = pHba->top_scsi_channel + 1;
2360 host->cmd_per_lun = 1;
2361 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2362 host->sg_tablesize = pHba->sg_tablesize;
2363 host->can_queue = pHba->post_fifo_size;
2364 host->use_cmd_list = 1;
2365
2366 return 0;
2367}
2368
2369
2370static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2371{
2372 adpt_hba* pHba;
2373 u32 hba_status;
2374 u32 dev_status;
2375 u32 reply_flags = readl(reply) & 0xff00;
2376
2377
2378
2379 u16 detailed_status = readl(reply+16) &0xffff;
2380 dev_status = (detailed_status & 0xff);
2381 hba_status = detailed_status >> 8;
2382
2383
2384 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2385
2386 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2387
2388 cmd->sense_buffer[0] = '\0';
2389
2390 if(!(reply_flags & MSG_FAIL)) {
2391 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2392 case I2O_SCSI_DSC_SUCCESS:
2393 cmd->result = (DID_OK << 16);
2394
2395 if (readl(reply+20) < cmd->underflow) {
2396 cmd->result = (DID_ERROR <<16);
2397 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2398 }
2399 break;
2400 case I2O_SCSI_DSC_REQUEST_ABORTED:
2401 cmd->result = (DID_ABORT << 16);
2402 break;
2403 case I2O_SCSI_DSC_PATH_INVALID:
2404 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2405 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2406 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2407 case I2O_SCSI_DSC_NO_ADAPTER:
2408 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2409 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2410 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2411 cmd->result = (DID_TIME_OUT << 16);
2412 break;
2413 case I2O_SCSI_DSC_ADAPTER_BUSY:
2414 case I2O_SCSI_DSC_BUS_BUSY:
2415 cmd->result = (DID_BUS_BUSY << 16);
2416 break;
2417 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2418 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2419 cmd->result = (DID_RESET << 16);
2420 break;
2421 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2422 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2423 cmd->result = (DID_PARITY << 16);
2424 break;
2425 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2426 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2427 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2428 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2429 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2430 case I2O_SCSI_DSC_DATA_OVERRUN:
2431 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2432 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2433 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2434 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2435 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2436 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2437 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2438 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2439 case I2O_SCSI_DSC_INVALID_CDB:
2440 case I2O_SCSI_DSC_LUN_INVALID:
2441 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2442 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2443 case I2O_SCSI_DSC_NO_NEXUS:
2444 case I2O_SCSI_DSC_CDB_RECEIVED:
2445 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2446 case I2O_SCSI_DSC_QUEUE_FROZEN:
2447 case I2O_SCSI_DSC_REQUEST_INVALID:
2448 default:
2449 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2450 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2451 hba_status, dev_status, cmd->cmnd[0]);
2452 cmd->result = (DID_ERROR << 16);
2453 break;
2454 }
2455
2456
2457
2458 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2459 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2460
2461 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2462 if(cmd->sense_buffer[0] == 0x70 &&
2463 cmd->sense_buffer[2] == DATA_PROTECT ){
2464
2465 cmd->result = (DID_TIME_OUT << 16);
2466 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2467 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2468 hba_status, dev_status, cmd->cmnd[0]);
2469
2470 }
2471 }
2472 } else {
2473
2474
2475
2476
2477 cmd->result = (DID_TIME_OUT << 16);
2478 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2479 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2480 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2481 }
2482
2483 cmd->result |= (dev_status);
2484
2485 if(cmd->scsi_done != NULL){
2486 cmd->scsi_done(cmd);
2487 }
2488 return cmd->result;
2489}
2490
2491
2492static s32 adpt_rescan(adpt_hba* pHba)
2493{
2494 s32 rcode;
2495 ulong flags = 0;
2496
2497 if(pHba->host)
2498 spin_lock_irqsave(pHba->host->host_lock, flags);
2499 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2500 goto out;
2501 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2502 goto out;
2503 rcode = 0;
2504out: if(pHba->host)
2505 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2506 return rcode;
2507}
2508
2509
2510static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2511{
2512 int i;
2513 int max;
2514 int tid;
2515 struct i2o_device *d;
2516 i2o_lct *lct = pHba->lct;
2517 u8 bus_no = 0;
2518 s16 scsi_id;
2519 u64 scsi_lun;
2520 u32 buf[10];
2521 struct adpt_device* pDev = NULL;
2522 struct i2o_device* pI2o_dev = NULL;
2523
2524 if (lct == NULL) {
2525 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2526 return -1;
2527 }
2528
2529 max = lct->table_size;
2530 max -= 3;
2531 max /= 9;
2532
2533
2534 for (d = pHba->devices; d; d = d->next) {
2535 pDev =(struct adpt_device*) d->owner;
2536 if(!pDev){
2537 continue;
2538 }
2539 pDev->state |= DPTI_DEV_UNSCANNED;
2540 }
2541
2542 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2543
2544 for(i=0;i<max;i++) {
2545 if( lct->lct_entry[i].user_tid != 0xfff){
2546 continue;
2547 }
2548
2549 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2550 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2551 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2552 tid = lct->lct_entry[i].tid;
2553 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2554 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2555 continue;
2556 }
2557 bus_no = buf[0]>>16;
2558 if (bus_no >= MAX_CHANNEL) {
2559 printk(KERN_WARNING
2560 "%s: Channel number %d out of range\n",
2561 pHba->name, bus_no);
2562 continue;
2563 }
2564
2565 scsi_id = buf[1];
2566 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2567 pDev = pHba->channel[bus_no].device[scsi_id];
2568
2569 while(pDev) {
2570 if(pDev->scsi_lun == scsi_lun) {
2571 break;
2572 }
2573 pDev = pDev->next_lun;
2574 }
2575 if(!pDev ) {
2576 d = kmalloc(sizeof(struct i2o_device),
2577 GFP_ATOMIC);
2578 if(d==NULL)
2579 {
2580 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2581 return -ENOMEM;
2582 }
2583
2584 d->controller = pHba;
2585 d->next = NULL;
2586
2587 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2588
2589 d->flags = 0;
2590 adpt_i2o_report_hba_unit(pHba, d);
2591 adpt_i2o_install_device(pHba, d);
2592
2593 pDev = pHba->channel[bus_no].device[scsi_id];
2594 if( pDev == NULL){
2595 pDev =
2596 kzalloc(sizeof(struct adpt_device),
2597 GFP_ATOMIC);
2598 if(pDev == NULL) {
2599 return -ENOMEM;
2600 }
2601 pHba->channel[bus_no].device[scsi_id] = pDev;
2602 } else {
2603 while (pDev->next_lun) {
2604 pDev = pDev->next_lun;
2605 }
2606 pDev = pDev->next_lun =
2607 kzalloc(sizeof(struct adpt_device),
2608 GFP_ATOMIC);
2609 if(pDev == NULL) {
2610 return -ENOMEM;
2611 }
2612 }
2613 pDev->tid = d->lct_data.tid;
2614 pDev->scsi_channel = bus_no;
2615 pDev->scsi_id = scsi_id;
2616 pDev->scsi_lun = scsi_lun;
2617 pDev->pI2o_dev = d;
2618 d->owner = pDev;
2619 pDev->type = (buf[0])&0xff;
2620 pDev->flags = (buf[0]>>8)&0xff;
2621
2622 if(scsi_id > pHba->top_scsi_id){
2623 pHba->top_scsi_id = scsi_id;
2624 }
2625 if(scsi_lun > pHba->top_scsi_lun){
2626 pHba->top_scsi_lun = scsi_lun;
2627 }
2628 continue;
2629 }
2630
2631
2632 while(pDev) {
2633 if(pDev->scsi_lun == scsi_lun) {
2634 if(!scsi_device_online(pDev->pScsi_dev)) {
2635 printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2636 pHba->name,bus_no,scsi_id,scsi_lun);
2637 if (pDev->pScsi_dev) {
2638 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2639 }
2640 }
2641 d = pDev->pI2o_dev;
2642 if(d->lct_data.tid != tid) {
2643 pDev->tid = tid;
2644 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2645 if (pDev->pScsi_dev) {
2646 pDev->pScsi_dev->changed = TRUE;
2647 pDev->pScsi_dev->removable = TRUE;
2648 }
2649 }
2650
2651 pDev->state = DPTI_DEV_ONLINE;
2652 break;
2653 }
2654 pDev = pDev->next_lun;
2655 }
2656 }
2657 }
2658 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2659 pDev =(struct adpt_device*) pI2o_dev->owner;
2660 if(!pDev){
2661 continue;
2662 }
2663
2664
2665 if (pDev->state & DPTI_DEV_UNSCANNED){
2666 pDev->state = DPTI_DEV_OFFLINE;
2667 printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2668 if (pDev->pScsi_dev) {
2669 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2670 }
2671 }
2672 }
2673 return 0;
2674}
2675
2676static void adpt_fail_posted_scbs(adpt_hba* pHba)
2677{
2678 struct scsi_cmnd* cmd = NULL;
2679 struct scsi_device* d = NULL;
2680
2681 shost_for_each_device(d, pHba->host) {
2682 unsigned long flags;
2683 spin_lock_irqsave(&d->list_lock, flags);
2684 list_for_each_entry(cmd, &d->cmd_list, list) {
2685 if(cmd->serial_number == 0){
2686 continue;
2687 }
2688 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2689 cmd->scsi_done(cmd);
2690 }
2691 spin_unlock_irqrestore(&d->list_lock, flags);
2692 }
2693}
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706static int adpt_i2o_activate_hba(adpt_hba* pHba)
2707{
2708 int rcode;
2709
2710 if(pHba->initialized ) {
2711 if (adpt_i2o_status_get(pHba) < 0) {
2712 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2713 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2714 return rcode;
2715 }
2716 if (adpt_i2o_status_get(pHba) < 0) {
2717 printk(KERN_INFO "HBA not responding.\n");
2718 return -1;
2719 }
2720 }
2721
2722 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2723 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2724 return -1;
2725 }
2726
2727 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2728 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2729 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2730 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2731 adpt_i2o_reset_hba(pHba);
2732 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2733 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2734 return -1;
2735 }
2736 }
2737 } else {
2738 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2739 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2740 return rcode;
2741 }
2742
2743 }
2744
2745 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2746 return -1;
2747 }
2748
2749
2750
2751 if (adpt_i2o_hrt_get(pHba) < 0) {
2752 return -1;
2753 }
2754
2755 return 0;
2756}
2757
2758
2759
2760
2761
2762static int adpt_i2o_online_hba(adpt_hba* pHba)
2763{
2764 if (adpt_i2o_systab_send(pHba) < 0) {
2765 adpt_i2o_delete_hba(pHba);
2766 return -1;
2767 }
2768
2769
2770 if (adpt_i2o_enable_hba(pHba) < 0) {
2771 adpt_i2o_delete_hba(pHba);
2772 return -1;
2773 }
2774
2775
2776 return 0;
2777}
2778
2779static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2780{
2781 u32 __iomem *msg;
2782 ulong timeout = jiffies + 5*HZ;
2783
2784 while(m == EMPTY_QUEUE){
2785 rmb();
2786 m = readl(pHba->post_port);
2787 if(m != EMPTY_QUEUE){
2788 break;
2789 }
2790 if(time_after(jiffies,timeout)){
2791 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2792 return 2;
2793 }
2794 schedule_timeout_uninterruptible(1);
2795 }
2796 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2797 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2798 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2799 writel( 0,&msg[2]);
2800 wmb();
2801
2802 writel(m, pHba->post_port);
2803 wmb();
2804 return 0;
2805}
2806
2807static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2808{
2809 u8 *status;
2810 dma_addr_t addr;
2811 u32 __iomem *msg = NULL;
2812 int i;
2813 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2814 u32 m;
2815
2816 do {
2817 rmb();
2818 m = readl(pHba->post_port);
2819 if (m != EMPTY_QUEUE) {
2820 break;
2821 }
2822
2823 if(time_after(jiffies,timeout)){
2824 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2825 return -ETIMEDOUT;
2826 }
2827 schedule_timeout_uninterruptible(1);
2828 } while(m == EMPTY_QUEUE);
2829
2830 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2831
2832 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2833 if (!status) {
2834 adpt_send_nop(pHba, m);
2835 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2836 pHba->name);
2837 return -ENOMEM;
2838 }
2839 memset(status, 0, 4);
2840
2841 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2842 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2843 writel(0, &msg[2]);
2844 writel(0x0106, &msg[3]);
2845 writel(4096, &msg[4]);
2846 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]);
2847 writel(0xD0000004, &msg[6]);
2848 writel((u32)addr, &msg[7]);
2849
2850 writel(m, pHba->post_port);
2851 wmb();
2852
2853
2854 do {
2855 if (*status) {
2856 if (*status != 0x01 ) {
2857 break;
2858 }
2859 }
2860 rmb();
2861 if(time_after(jiffies,timeout)){
2862 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2863
2864
2865
2866
2867 return -ETIMEDOUT;
2868 }
2869 schedule_timeout_uninterruptible(1);
2870 } while (1);
2871
2872
2873
2874 if(*status != 0x04 ) {
2875 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2876 return -2;
2877 }
2878 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2879
2880 if(pHba->reply_pool != NULL) {
2881 dma_free_coherent(&pHba->pDev->dev,
2882 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2883 pHba->reply_pool, pHba->reply_pool_pa);
2884 }
2885
2886 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2887 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2888 &pHba->reply_pool_pa, GFP_KERNEL);
2889 if (!pHba->reply_pool) {
2890 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2891 return -ENOMEM;
2892 }
2893 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2894
2895 for(i = 0; i < pHba->reply_fifo_size; i++) {
2896 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2897 pHba->reply_port);
2898 wmb();
2899 }
2900 adpt_i2o_status_get(pHba);
2901 return 0;
2902}
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916static s32 adpt_i2o_status_get(adpt_hba* pHba)
2917{
2918 ulong timeout;
2919 u32 m;
2920 u32 __iomem *msg;
2921 u8 *status_block=NULL;
2922
2923 if(pHba->status_block == NULL) {
2924 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2925 sizeof(i2o_status_block),
2926 &pHba->status_block_pa, GFP_KERNEL);
2927 if(pHba->status_block == NULL) {
2928 printk(KERN_ERR
2929 "dpti%d: Get Status Block failed; Out of memory. \n",
2930 pHba->unit);
2931 return -ENOMEM;
2932 }
2933 }
2934 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2935 status_block = (u8*)(pHba->status_block);
2936 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2937 do {
2938 rmb();
2939 m = readl(pHba->post_port);
2940 if (m != EMPTY_QUEUE) {
2941 break;
2942 }
2943 if(time_after(jiffies,timeout)){
2944 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2945 pHba->name);
2946 return -ETIMEDOUT;
2947 }
2948 schedule_timeout_uninterruptible(1);
2949 } while(m==EMPTY_QUEUE);
2950
2951
2952 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2953
2954 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2955 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2956 writel(1, &msg[2]);
2957 writel(0, &msg[3]);
2958 writel(0, &msg[4]);
2959 writel(0, &msg[5]);
2960 writel( dma_low(pHba->status_block_pa), &msg[6]);
2961 writel( dma_high(pHba->status_block_pa), &msg[7]);
2962 writel(sizeof(i2o_status_block), &msg[8]);
2963
2964
2965 writel(m, pHba->post_port);
2966 wmb();
2967
2968 while(status_block[87]!=0xff){
2969 if(time_after(jiffies,timeout)){
2970 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2971 pHba->unit);
2972 return -ETIMEDOUT;
2973 }
2974 rmb();
2975 schedule_timeout_uninterruptible(1);
2976 }
2977
2978
2979 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2980 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2981 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2982 }
2983
2984 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2985 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2986 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2987 }
2988
2989
2990 if (dpt_dma64(pHba)) {
2991 pHba->sg_tablesize
2992 = ((pHba->status_block->inbound_frame_size * 4
2993 - 14 * sizeof(u32))
2994 / (sizeof(struct sg_simple_element) + sizeof(u32)));
2995 } else {
2996 pHba->sg_tablesize
2997 = ((pHba->status_block->inbound_frame_size * 4
2998 - 12 * sizeof(u32))
2999 / sizeof(struct sg_simple_element));
3000 }
3001 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3002 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3003 }
3004
3005
3006#ifdef DEBUG
3007 printk("dpti%d: State = ",pHba->unit);
3008 switch(pHba->status_block->iop_state) {
3009 case 0x01:
3010 printk("INIT\n");
3011 break;
3012 case 0x02:
3013 printk("RESET\n");
3014 break;
3015 case 0x04:
3016 printk("HOLD\n");
3017 break;
3018 case 0x05:
3019 printk("READY\n");
3020 break;
3021 case 0x08:
3022 printk("OPERATIONAL\n");
3023 break;
3024 case 0x10:
3025 printk("FAILED\n");
3026 break;
3027 case 0x11:
3028 printk("FAULTED\n");
3029 break;
3030 default:
3031 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3032 }
3033#endif
3034 return 0;
3035}
3036
3037
3038
3039
3040static int adpt_i2o_lct_get(adpt_hba* pHba)
3041{
3042 u32 msg[8];
3043 int ret;
3044 u32 buf[16];
3045
3046 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3047 pHba->lct_size = pHba->status_block->expected_lct_size;
3048 }
3049 do {
3050 if (pHba->lct == NULL) {
3051 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3052 pHba->lct_size, &pHba->lct_pa,
3053 GFP_ATOMIC);
3054 if(pHba->lct == NULL) {
3055 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3056 pHba->name);
3057 return -ENOMEM;
3058 }
3059 }
3060 memset(pHba->lct, 0, pHba->lct_size);
3061
3062 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3063 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3064 msg[2] = 0;
3065 msg[3] = 0;
3066 msg[4] = 0xFFFFFFFF;
3067 msg[5] = 0x00000000;
3068 msg[6] = 0xD0000000|pHba->lct_size;
3069 msg[7] = (u32)pHba->lct_pa;
3070
3071 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3072 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3073 pHba->name, ret);
3074 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3075 return ret;
3076 }
3077
3078 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3079 pHba->lct_size = pHba->lct->table_size << 2;
3080 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3081 pHba->lct, pHba->lct_pa);
3082 pHba->lct = NULL;
3083 }
3084 } while (pHba->lct == NULL);
3085
3086 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3087
3088
3089
3090 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3091 pHba->FwDebugBufferSize = buf[1];
3092 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3093 pHba->FwDebugBufferSize);
3094 if (pHba->FwDebugBuffer_P) {
3095 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3096 FW_DEBUG_FLAGS_OFFSET;
3097 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3098 FW_DEBUG_BLED_OFFSET;
3099 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3100 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3101 FW_DEBUG_STR_LENGTH_OFFSET;
3102 pHba->FwDebugBuffer_P += buf[2];
3103 pHba->FwDebugFlags = 0;
3104 }
3105 }
3106
3107 return 0;
3108}
3109
3110static int adpt_i2o_build_sys_table(void)
3111{
3112 adpt_hba* pHba = hba_chain;
3113 int count = 0;
3114
3115 if (sys_tbl)
3116 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3117 sys_tbl, sys_tbl_pa);
3118
3119 sys_tbl_len = sizeof(struct i2o_sys_tbl) +
3120 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3121
3122 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3123 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3124 if (!sys_tbl) {
3125 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3126 return -ENOMEM;
3127 }
3128 memset(sys_tbl, 0, sys_tbl_len);
3129
3130 sys_tbl->num_entries = hba_count;
3131 sys_tbl->version = I2OVERSION;
3132 sys_tbl->change_ind = sys_tbl_ind++;
3133
3134 for(pHba = hba_chain; pHba; pHba = pHba->next) {
3135 u64 addr;
3136
3137 if (adpt_i2o_status_get(pHba)) {
3138 sys_tbl->num_entries--;
3139 continue;
3140 }
3141
3142 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3143 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3144 sys_tbl->iops[count].seg_num = 0;
3145 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3146 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3147 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3148 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3149 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1;
3150 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3151 addr = pHba->base_addr_phys + 0x40;
3152 sys_tbl->iops[count].inbound_low = dma_low(addr);
3153 sys_tbl->iops[count].inbound_high = dma_high(addr);
3154
3155 count++;
3156 }
3157
3158#ifdef DEBUG
3159{
3160 u32 *table = (u32*)sys_tbl;
3161 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3162 for(count = 0; count < (sys_tbl_len >>2); count++) {
3163 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3164 count, table[count]);
3165 }
3166}
3167#endif
3168
3169 return 0;
3170}
3171
3172
3173
3174
3175
3176
3177static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3178{
3179 char buf[64];
3180 int unit = d->lct_data.tid;
3181
3182 printk(KERN_INFO "TID %3.3d ", unit);
3183
3184 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3185 {
3186 buf[16]=0;
3187 printk(" Vendor: %-12.12s", buf);
3188 }
3189 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3190 {
3191 buf[16]=0;
3192 printk(" Device: %-12.12s", buf);
3193 }
3194 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3195 {
3196 buf[8]=0;
3197 printk(" Rev: %-12.12s\n", buf);
3198 }
3199#ifdef DEBUG
3200 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3201 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3202 printk(KERN_INFO "\tFlags: ");
3203
3204 if(d->lct_data.device_flags&(1<<0))
3205 printk("C");
3206 if(d->lct_data.device_flags&(1<<1))
3207 printk("U");
3208 if(!(d->lct_data.device_flags&(1<<4)))
3209 printk("P");
3210 if(!(d->lct_data.device_flags&(1<<5)))
3211 printk("M");
3212 printk("\n");
3213#endif
3214}
3215
3216#ifdef DEBUG
3217
3218
3219
3220static const char *adpt_i2o_get_class_name(int class)
3221{
3222 int idx = 16;
3223 static char *i2o_class_name[] = {
3224 "Executive",
3225 "Device Driver Module",
3226 "Block Device",
3227 "Tape Device",
3228 "LAN Interface",
3229 "WAN Interface",
3230 "Fibre Channel Port",
3231 "Fibre Channel Device",
3232 "SCSI Device",
3233 "ATE Port",
3234 "ATE Device",
3235 "Floppy Controller",
3236 "Floppy Device",
3237 "Secondary Bus Port",
3238 "Peer Transport Agent",
3239 "Peer Transport",
3240 "Unknown"
3241 };
3242
3243 switch(class&0xFFF) {
3244 case I2O_CLASS_EXECUTIVE:
3245 idx = 0; break;
3246 case I2O_CLASS_DDM:
3247 idx = 1; break;
3248 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3249 idx = 2; break;
3250 case I2O_CLASS_SEQUENTIAL_STORAGE:
3251 idx = 3; break;
3252 case I2O_CLASS_LAN:
3253 idx = 4; break;
3254 case I2O_CLASS_WAN:
3255 idx = 5; break;
3256 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3257 idx = 6; break;
3258 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3259 idx = 7; break;
3260 case I2O_CLASS_SCSI_PERIPHERAL:
3261 idx = 8; break;
3262 case I2O_CLASS_ATE_PORT:
3263 idx = 9; break;
3264 case I2O_CLASS_ATE_PERIPHERAL:
3265 idx = 10; break;
3266 case I2O_CLASS_FLOPPY_CONTROLLER:
3267 idx = 11; break;
3268 case I2O_CLASS_FLOPPY_DEVICE:
3269 idx = 12; break;
3270 case I2O_CLASS_BUS_ADAPTER_PORT:
3271 idx = 13; break;
3272 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3273 idx = 14; break;
3274 case I2O_CLASS_PEER_TRANSPORT:
3275 idx = 15; break;
3276 }
3277 return i2o_class_name[idx];
3278}
3279#endif
3280
3281
3282static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3283{
3284 u32 msg[6];
3285 int ret, size = sizeof(i2o_hrt);
3286
3287 do {
3288 if (pHba->hrt == NULL) {
3289 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3290 size, &pHba->hrt_pa, GFP_KERNEL);
3291 if (pHba->hrt == NULL) {
3292 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3293 return -ENOMEM;
3294 }
3295 }
3296
3297 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3298 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3299 msg[2]= 0;
3300 msg[3]= 0;
3301 msg[4]= (0xD0000000 | size);
3302 msg[5]= (u32)pHba->hrt_pa;
3303
3304 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3305 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3306 return ret;
3307 }
3308
3309 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3310 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3311 dma_free_coherent(&pHba->pDev->dev, size,
3312 pHba->hrt, pHba->hrt_pa);
3313 size = newsize;
3314 pHba->hrt = NULL;
3315 }
3316 } while(pHba->hrt == NULL);
3317 return 0;
3318}
3319
3320
3321
3322
3323static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3324 int group, int field, void *buf, int buflen)
3325{
3326 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3327 u8 *opblk_va;
3328 dma_addr_t opblk_pa;
3329 u8 *resblk_va;
3330 dma_addr_t resblk_pa;
3331
3332 int size;
3333
3334
3335 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3336 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3337 if (resblk_va == NULL) {
3338 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3339 return -ENOMEM;
3340 }
3341
3342 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3343 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3344 if (opblk_va == NULL) {
3345 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3346 resblk_va, resblk_pa);
3347 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3348 pHba->name);
3349 return -ENOMEM;
3350 }
3351 if (field == -1)
3352 opblk[4] = -1;
3353
3354 memcpy(opblk_va, opblk, sizeof(opblk));
3355 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3356 opblk_va, opblk_pa, sizeof(opblk),
3357 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3358 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3359 if (size == -ETIME) {
3360 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3361 resblk_va, resblk_pa);
3362 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3363 return -ETIME;
3364 } else if (size == -EINTR) {
3365 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3366 resblk_va, resblk_pa);
3367 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3368 return -EINTR;
3369 }
3370
3371 memcpy(buf, resblk_va+8, buflen);
3372
3373 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3374 resblk_va, resblk_pa);
3375 if (size < 0)
3376 return size;
3377
3378 return buflen;
3379}
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3391 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3392 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3393{
3394 u32 msg[9];
3395 u32 *res = (u32 *)resblk_va;
3396 int wait_status;
3397
3398 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3399 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3400 msg[2] = 0;
3401 msg[3] = 0;
3402 msg[4] = 0;
3403 msg[5] = 0x54000000 | oplen;
3404 msg[6] = (u32)opblk_pa;
3405 msg[7] = 0xD0000000 | reslen;
3406 msg[8] = (u32)resblk_pa;
3407
3408 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3409 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3410 return wait_status;
3411 }
3412
3413 if (res[1]&0x00FF0000) {
3414 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3415 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3416 pHba->name,
3417 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3418 : "PARAMS_GET",
3419 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3420 return -((res[1] >> 16) & 0xFF);
3421 }
3422
3423 return 4 + ((res[1] & 0x0000FFFF) << 2);
3424}
3425
3426
3427static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3428{
3429 u32 msg[4];
3430 int ret;
3431
3432 adpt_i2o_status_get(pHba);
3433
3434
3435
3436 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3437 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3438 return 0;
3439 }
3440
3441 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3442 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3443 msg[2] = 0;
3444 msg[3] = 0;
3445
3446 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3447 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3448 pHba->unit, -ret);
3449 } else {
3450 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3451 }
3452
3453 adpt_i2o_status_get(pHba);
3454 return ret;
3455}
3456
3457
3458
3459
3460
3461static int adpt_i2o_enable_hba(adpt_hba* pHba)
3462{
3463 u32 msg[4];
3464 int ret;
3465
3466 adpt_i2o_status_get(pHba);
3467 if(!pHba->status_block){
3468 return -ENOMEM;
3469 }
3470
3471 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3472 return 0;
3473
3474 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3475 return -EINVAL;
3476
3477 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3478 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3479 msg[2]= 0;
3480 msg[3]= 0;
3481
3482 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3483 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3484 pHba->name, ret);
3485 } else {
3486 PDEBUG("%s: Enabled.\n", pHba->name);
3487 }
3488
3489 adpt_i2o_status_get(pHba);
3490 return ret;
3491}
3492
3493
3494static int adpt_i2o_systab_send(adpt_hba* pHba)
3495{
3496 u32 msg[12];
3497 int ret;
3498
3499 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3500 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3501 msg[2] = 0;
3502 msg[3] = 0;
3503 msg[4] = (0<<16) | ((pHba->unit+2) << 12);
3504 msg[5] = 0;
3505
3506
3507
3508
3509
3510
3511 msg[6] = 0x54000000 | sys_tbl_len;
3512 msg[7] = (u32)sys_tbl_pa;
3513 msg[8] = 0x54000000 | 0;
3514 msg[9] = 0;
3515 msg[10] = 0xD4000000 | 0;
3516 msg[11] = 0;
3517
3518 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3519 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3520 pHba->name, ret);
3521 }
3522#ifdef DEBUG
3523 else {
3524 PINFO("%s: SysTab set.\n", pHba->name);
3525 }
3526#endif
3527
3528 return ret;
3529 }
3530
3531
3532
3533
3534
3535
3536
3537
3538#ifdef UARTDELAY
3539
3540static static void adpt_delay(int millisec)
3541{
3542 int i;
3543 for (i = 0; i < millisec; i++) {
3544 udelay(1000);
3545 }
3546}
3547
3548#endif
3549
3550static struct scsi_host_template driver_template = {
3551 .module = THIS_MODULE,
3552 .name = "dpt_i2o",
3553 .proc_name = "dpt_i2o",
3554 .show_info = adpt_show_info,
3555 .info = adpt_info,
3556 .queuecommand = adpt_queue,
3557 .eh_abort_handler = adpt_abort,
3558 .eh_device_reset_handler = adpt_device_reset,
3559 .eh_bus_reset_handler = adpt_bus_reset,
3560 .eh_host_reset_handler = adpt_reset,
3561 .bios_param = adpt_bios_param,
3562 .slave_configure = adpt_slave_configure,
3563 .can_queue = MAX_TO_IOP_MESSAGES,
3564 .this_id = 7,
3565 .cmd_per_lun = 1,
3566 .use_clustering = ENABLE_CLUSTERING,
3567};
3568
3569static int __init adpt_init(void)
3570{
3571 int error;
3572 adpt_hba *pHba, *next;
3573
3574 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3575
3576 error = adpt_detect(&driver_template);
3577 if (error < 0)
3578 return error;
3579 if (hba_chain == NULL)
3580 return -ENODEV;
3581
3582 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3583 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3584 if (error)
3585 goto fail;
3586 scsi_scan_host(pHba->host);
3587 }
3588 return 0;
3589fail:
3590 for (pHba = hba_chain; pHba; pHba = next) {
3591 next = pHba->next;
3592 scsi_remove_host(pHba->host);
3593 }
3594 return error;
3595}
3596
3597static void __exit adpt_exit(void)
3598{
3599 adpt_hba *pHba, *next;
3600
3601 for (pHba = hba_chain; pHba; pHba = pHba->next)
3602 scsi_remove_host(pHba->host);
3603 for (pHba = hba_chain; pHba; pHba = next) {
3604 next = pHba->next;
3605 adpt_release(pHba->host);
3606 }
3607}
3608
3609module_init(adpt_init);
3610module_exit(adpt_exit);
3611
3612MODULE_LICENSE("GPL");
3613