1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/module.h>
33
34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36
37
38
39#include <linux/ioctl.h>
40#include <linux/uaccess.h>
41
42#include <linux/stat.h>
43#include <linux/slab.h>
44#include <linux/pci.h>
45#include <linux/proc_fs.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h>
48#include <linux/interrupt.h>
49#include <linux/kernel.h>
50#include <linux/sched.h>
51#include <linux/reboot.h>
52#include <linux/spinlock.h>
53#include <linux/dma-mapping.h>
54
55#include <linux/timer.h>
56#include <linux/string.h>
57#include <linux/ioport.h>
58#include <linux/mutex.h>
59
60#include <asm/processor.h>
61#include <asm/pgtable.h>
62#include <asm/io.h>
63
64#include <scsi/scsi.h>
65#include <scsi/scsi_cmnd.h>
66#include <scsi/scsi_device.h>
67#include <scsi/scsi_host.h>
68#include <scsi/scsi_tcq.h>
69
70#include "dpt/dptsig.h"
71#include "dpti.h"
72
73
74
75
76
77
78static DEFINE_MUTEX(adpt_mutex);
79static dpt_sig_S DPTI_sig = {
80 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81#ifdef __i386__
82 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83#elif defined(__ia64__)
84 PROC_INTEL, PROC_IA64,
85#elif defined(__sparc__)
86 PROC_ULTRASPARC, PROC_ULTRASPARC,
87#elif defined(__alpha__)
88 PROC_ALPHA, PROC_ALPHA,
89#else
90 (-1),(-1),
91#endif
92 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
95};
96
97
98
99
100
101
102
103
104
105static DEFINE_MUTEX(adpt_configuration_lock);
106
107static struct i2o_sys_tbl *sys_tbl;
108static dma_addr_t sys_tbl_pa;
109static int sys_tbl_ind;
110static int sys_tbl_len;
111
112static adpt_hba* hba_chain = NULL;
113static int hba_count = 0;
114
115static struct class *adpt_sysfs_class;
116
117static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
118#ifdef CONFIG_COMPAT
119static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
120#endif
121
122static const struct file_operations adpt_fops = {
123 .unlocked_ioctl = adpt_unlocked_ioctl,
124 .open = adpt_open,
125 .release = adpt_close,
126#ifdef CONFIG_COMPAT
127 .compat_ioctl = compat_adpt_ioctl,
128#endif
129 .llseek = noop_llseek,
130};
131
132
133
134
135struct adpt_i2o_post_wait_data
136{
137 int status;
138 u32 id;
139 adpt_wait_queue_head_t *wq;
140 struct adpt_i2o_post_wait_data *next;
141};
142
143static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
144static u32 adpt_post_wait_id = 0;
145static DEFINE_SPINLOCK(adpt_post_wait_lock);
146
147
148
149
150
151
152
153static inline int dpt_dma64(adpt_hba *pHba)
154{
155 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
156}
157
158static inline u32 dma_high(dma_addr_t addr)
159{
160 return upper_32_bits(addr);
161}
162
163static inline u32 dma_low(dma_addr_t addr)
164{
165 return (u32)addr;
166}
167
168static u8 adpt_read_blink_led(adpt_hba* host)
169{
170 if (host->FwDebugBLEDflag_P) {
171 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
172 return readb(host->FwDebugBLEDvalue_P);
173 }
174 }
175 return 0;
176}
177
178
179
180
181
182
183#ifdef MODULE
184static struct pci_device_id dptids[] = {
185 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
186 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
187 { 0, }
188};
189#endif
190
191MODULE_DEVICE_TABLE(pci,dptids);
192
193static int adpt_detect(struct scsi_host_template* sht)
194{
195 struct pci_dev *pDev = NULL;
196 adpt_hba *pHba;
197 adpt_hba *next;
198
199 PINFO("Detecting Adaptec I2O RAID controllers...\n");
200
201
202 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
203 if(pDev->device == PCI_DPT_DEVICE_ID ||
204 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
205 if(adpt_install_hba(sht, pDev) ){
206 PERROR("Could not Init an I2O RAID device\n");
207 PERROR("Will not try to detect others.\n");
208 return hba_count-1;
209 }
210 pci_dev_get(pDev);
211 }
212 }
213
214
215 for (pHba = hba_chain; pHba; pHba = next) {
216 next = pHba->next;
217
218 if (adpt_i2o_activate_hba(pHba) < 0) {
219 adpt_i2o_delete_hba(pHba);
220 }
221 }
222
223
224
225
226rebuild_sys_tab:
227 if (hba_chain == NULL)
228 return 0;
229
230
231
232
233
234 if (adpt_i2o_build_sys_table() < 0) {
235 adpt_i2o_sys_shutdown();
236 return 0;
237 }
238
239 PDEBUG("HBA's in HOLD state\n");
240
241
242 for (pHba = hba_chain; pHba; pHba = pHba->next) {
243 if (adpt_i2o_online_hba(pHba) < 0) {
244 adpt_i2o_delete_hba(pHba);
245 goto rebuild_sys_tab;
246 }
247 }
248
249
250 PDEBUG("HBA's in OPERATIONAL state\n");
251
252 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
253 for (pHba = hba_chain; pHba; pHba = next) {
254 next = pHba->next;
255 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
256 if (adpt_i2o_lct_get(pHba) < 0){
257 adpt_i2o_delete_hba(pHba);
258 continue;
259 }
260
261 if (adpt_i2o_parse_lct(pHba) < 0){
262 adpt_i2o_delete_hba(pHba);
263 continue;
264 }
265 adpt_inquiry(pHba);
266 }
267
268 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
269 if (IS_ERR(adpt_sysfs_class)) {
270 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
271 adpt_sysfs_class = NULL;
272 }
273
274 for (pHba = hba_chain; pHba; pHba = next) {
275 next = pHba->next;
276 if (adpt_scsi_host_alloc(pHba, sht) < 0){
277 adpt_i2o_delete_hba(pHba);
278 continue;
279 }
280 pHba->initialized = TRUE;
281 pHba->state &= ~DPTI_STATE_RESET;
282 if (adpt_sysfs_class) {
283 struct device *dev = device_create(adpt_sysfs_class,
284 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
285 "dpti%d", pHba->unit);
286 if (IS_ERR(dev)) {
287 printk(KERN_WARNING"dpti%d: unable to "
288 "create device in dpt_i2o class\n",
289 pHba->unit);
290 }
291 }
292 }
293
294
295
296
297 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
298 adpt_i2o_sys_shutdown();
299 return 0;
300 }
301 return hba_count;
302}
303
304
305static void adpt_release(adpt_hba *pHba)
306{
307 struct Scsi_Host *shost = pHba->host;
308
309 scsi_remove_host(shost);
310
311 adpt_i2o_delete_hba(pHba);
312 scsi_host_put(shost);
313}
314
315
316static void adpt_inquiry(adpt_hba* pHba)
317{
318 u32 msg[17];
319 u32 *mptr;
320 u32 *lenptr;
321 int direction;
322 int scsidir;
323 u32 len;
324 u32 reqlen;
325 u8* buf;
326 dma_addr_t addr;
327 u8 scb[16];
328 s32 rcode;
329
330 memset(msg, 0, sizeof(msg));
331 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
332 if(!buf){
333 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
334 return;
335 }
336 memset((void*)buf, 0, 36);
337
338 len = 36;
339 direction = 0x00000000;
340 scsidir =0x40000000;
341
342 if (dpt_dma64(pHba))
343 reqlen = 17;
344 else
345 reqlen = 14;
346
347 msg[0] = reqlen<<16 | SGL_OFFSET_12;
348 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
349 msg[2] = 0;
350 msg[3] = 0;
351
352 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
353 msg[5] = ADAPTER_TID | 1<<16 ;
354
355
356
357
358 msg[6] = scsidir|0x20a00000| 6 ;
359
360 mptr=msg+7;
361
362 memset(scb, 0, sizeof(scb));
363
364 scb[0] = INQUIRY;
365 scb[1] = 0;
366 scb[2] = 0;
367 scb[3] = 0;
368 scb[4] = 36;
369 scb[5] = 0;
370
371
372 memcpy(mptr, scb, sizeof(scb));
373 mptr+=4;
374 lenptr=mptr++;
375
376
377 *lenptr = len;
378 if (dpt_dma64(pHba)) {
379 *mptr++ = (0x7C<<24)+(2<<16)+0x02;
380 *mptr++ = 1 << PAGE_SHIFT;
381 *mptr++ = 0xD0000000|direction|len;
382 *mptr++ = dma_low(addr);
383 *mptr++ = dma_high(addr);
384 } else {
385 *mptr++ = 0xD0000000|direction|len;
386 *mptr++ = addr;
387 }
388
389
390 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
391 if (rcode != 0) {
392 sprintf(pHba->detail, "Adaptec I2O RAID");
393 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
394 if (rcode != -ETIME && rcode != -EINTR)
395 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
396 } else {
397 memset(pHba->detail, 0, sizeof(pHba->detail));
398 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
399 memcpy(&(pHba->detail[16]), " Model: ", 8);
400 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
401 memcpy(&(pHba->detail[40]), " FW: ", 4);
402 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
403 pHba->detail[48] = '\0';
404 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
405 }
406 adpt_i2o_status_get(pHba);
407 return ;
408}
409
410
411static int adpt_slave_configure(struct scsi_device * device)
412{
413 struct Scsi_Host *host = device->host;
414 adpt_hba* pHba;
415
416 pHba = (adpt_hba *) host->hostdata[0];
417
418 if (host->can_queue && device->tagged_supported) {
419 scsi_change_queue_depth(device,
420 host->can_queue - 1);
421 }
422 return 0;
423}
424
425static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
426{
427 adpt_hba* pHba = NULL;
428 struct adpt_device* pDev = NULL;
429
430 cmd->scsi_done = done;
431
432
433
434
435
436
437
438 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
439 cmd->result = (DID_OK << 16);
440 cmd->scsi_done(cmd);
441 return 0;
442 }
443
444 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
445 if (!pHba) {
446 return FAILED;
447 }
448
449 rmb();
450 if ((pHba->state) & DPTI_STATE_RESET)
451 return SCSI_MLQUEUE_HOST_BUSY;
452
453
454
455 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
456
457
458
459
460
461 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
462
463
464 cmd->result = (DID_NO_CONNECT << 16);
465 cmd->scsi_done(cmd);
466 return 0;
467 }
468 cmd->device->hostdata = pDev;
469 }
470 pDev->pScsi_dev = cmd->device;
471
472
473
474
475
476 if (pDev->state & DPTI_DEV_RESET ) {
477 return FAILED;
478 }
479 return adpt_scsi_to_i2o(pHba, cmd, pDev);
480}
481
482static DEF_SCSI_QCMD(adpt_queue)
483
484static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
485 sector_t capacity, int geom[])
486{
487 int heads=-1;
488 int sectors=-1;
489 int cylinders=-1;
490
491
492
493
494 if (capacity < 0x2000 ) {
495 heads = 18;
496 sectors = 2;
497 }
498
499 else if (capacity < 0x20000) {
500 heads = 64;
501 sectors = 32;
502 }
503
504 else if (capacity < 0x40000) {
505 heads = 65;
506 sectors = 63;
507 }
508
509 else if (capacity < 0x80000) {
510 heads = 128;
511 sectors = 63;
512 }
513
514 else {
515 heads = 255;
516 sectors = 63;
517 }
518 cylinders = sector_div(capacity, heads * sectors);
519
520
521 if(sdev->type == 5) {
522 heads = 252;
523 sectors = 63;
524 cylinders = 1111;
525 }
526
527 geom[0] = heads;
528 geom[1] = sectors;
529 geom[2] = cylinders;
530
531 PDEBUG("adpt_bios_param: exit\n");
532 return 0;
533}
534
535
536static const char *adpt_info(struct Scsi_Host *host)
537{
538 adpt_hba* pHba;
539
540 pHba = (adpt_hba *) host->hostdata[0];
541 return (char *) (pHba->detail);
542}
543
544static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
545{
546 struct adpt_device* d;
547 int id;
548 int chan;
549 adpt_hba* pHba;
550 int unit;
551
552
553 mutex_lock(&adpt_configuration_lock);
554 for (pHba = hba_chain; pHba; pHba = pHba->next) {
555 if (pHba->host == host) {
556 break;
557 }
558 }
559 mutex_unlock(&adpt_configuration_lock);
560 if (pHba == NULL) {
561 return 0;
562 }
563 host = pHba->host;
564
565 seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
566 seq_printf(m, "%s\n", pHba->detail);
567 seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
568 pHba->host->host_no, pHba->name, host->irq);
569 seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
570 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
571
572 seq_puts(m, "Devices:\n");
573 for(chan = 0; chan < MAX_CHANNEL; chan++) {
574 for(id = 0; id < MAX_ID; id++) {
575 d = pHba->channel[chan].device[id];
576 while(d) {
577 seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
578 seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
579
580 unit = d->pI2o_dev->lct_data.tid;
581 seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
582 unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
583 scsi_device_online(d->pScsi_dev)? "online":"offline");
584 d = d->next_lun;
585 }
586 }
587 }
588 return 0;
589}
590
591
592
593
594static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
595{
596 return (u32)cmd->serial_number;
597}
598
599
600
601
602
603static struct scsi_cmnd *
604 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
605{
606 struct scsi_cmnd * cmd;
607 struct scsi_device * d;
608
609 if (context == 0)
610 return NULL;
611
612 spin_unlock(pHba->host->host_lock);
613 shost_for_each_device(d, pHba->host) {
614 unsigned long flags;
615 spin_lock_irqsave(&d->list_lock, flags);
616 list_for_each_entry(cmd, &d->cmd_list, list) {
617 if (((u32)cmd->serial_number == context)) {
618 spin_unlock_irqrestore(&d->list_lock, flags);
619 scsi_device_put(d);
620 spin_lock(pHba->host->host_lock);
621 return cmd;
622 }
623 }
624 spin_unlock_irqrestore(&d->list_lock, flags);
625 }
626 spin_lock(pHba->host->host_lock);
627
628 return NULL;
629}
630
631
632
633
634static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
635{
636#if BITS_PER_LONG == 32
637 return (u32)(unsigned long)reply;
638#else
639 ulong flags = 0;
640 u32 nr, i;
641
642 spin_lock_irqsave(pHba->host->host_lock, flags);
643 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
644 for (i = 0; i < nr; i++) {
645 if (pHba->ioctl_reply_context[i] == NULL) {
646 pHba->ioctl_reply_context[i] = reply;
647 break;
648 }
649 }
650 spin_unlock_irqrestore(pHba->host->host_lock, flags);
651 if (i >= nr) {
652 printk(KERN_WARNING"%s: Too many outstanding "
653 "ioctl commands\n", pHba->name);
654 return (u32)-1;
655 }
656
657 return i;
658#endif
659}
660
661
662
663
664static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
665{
666#if BITS_PER_LONG == 32
667 return (void *)(unsigned long)context;
668#else
669 void *p = pHba->ioctl_reply_context[context];
670 pHba->ioctl_reply_context[context] = NULL;
671
672 return p;
673#endif
674}
675
676
677
678
679
680
681static int adpt_abort(struct scsi_cmnd * cmd)
682{
683 adpt_hba* pHba = NULL;
684 struct adpt_device* dptdevice;
685 u32 msg[5];
686 int rcode;
687
688 if(cmd->serial_number == 0){
689 return FAILED;
690 }
691 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
692 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
693 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
694 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
695 return FAILED;
696 }
697
698 memset(msg, 0, sizeof(msg));
699 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
700 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
701 msg[2] = 0;
702 msg[3]= 0;
703 msg[4] = adpt_cmd_to_context(cmd);
704 if (pHba->host)
705 spin_lock_irq(pHba->host->host_lock);
706 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
707 if (pHba->host)
708 spin_unlock_irq(pHba->host->host_lock);
709 if (rcode != 0) {
710 if(rcode == -EOPNOTSUPP ){
711 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
712 return FAILED;
713 }
714 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
715 return FAILED;
716 }
717 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
718 return SUCCESS;
719}
720
721
722#define I2O_DEVICE_RESET 0x27
723
724
725
726static int adpt_device_reset(struct scsi_cmnd* cmd)
727{
728 adpt_hba* pHba;
729 u32 msg[4];
730 u32 rcode;
731 int old_state;
732 struct adpt_device* d = cmd->device->hostdata;
733
734 pHba = (void*) cmd->device->host->hostdata[0];
735 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
736 if (!d) {
737 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
738 return FAILED;
739 }
740 memset(msg, 0, sizeof(msg));
741 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
742 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
743 msg[2] = 0;
744 msg[3] = 0;
745
746 if (pHba->host)
747 spin_lock_irq(pHba->host->host_lock);
748 old_state = d->state;
749 d->state |= DPTI_DEV_RESET;
750 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
751 d->state = old_state;
752 if (pHba->host)
753 spin_unlock_irq(pHba->host->host_lock);
754 if (rcode != 0) {
755 if(rcode == -EOPNOTSUPP ){
756 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
757 return FAILED;
758 }
759 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
760 return FAILED;
761 } else {
762 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
763 return SUCCESS;
764 }
765}
766
767
768#define I2O_HBA_BUS_RESET 0x87
769
770static int adpt_bus_reset(struct scsi_cmnd* cmd)
771{
772 adpt_hba* pHba;
773 u32 msg[4];
774 u32 rcode;
775
776 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
777 memset(msg, 0, sizeof(msg));
778 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
779 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
780 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
781 msg[2] = 0;
782 msg[3] = 0;
783 if (pHba->host)
784 spin_lock_irq(pHba->host->host_lock);
785 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
786 if (pHba->host)
787 spin_unlock_irq(pHba->host->host_lock);
788 if (rcode != 0) {
789 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
790 return FAILED;
791 } else {
792 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
793 return SUCCESS;
794 }
795}
796
797
798static int __adpt_reset(struct scsi_cmnd* cmd)
799{
800 adpt_hba* pHba;
801 int rcode;
802 char name[32];
803
804 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
805 strncpy(name, pHba->name, sizeof(name));
806 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
807 rcode = adpt_hba_reset(pHba);
808 if(rcode == 0){
809 printk(KERN_WARNING"%s: HBA reset complete\n", name);
810 return SUCCESS;
811 } else {
812 printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
813 return FAILED;
814 }
815}
816
817static int adpt_reset(struct scsi_cmnd* cmd)
818{
819 int rc;
820
821 spin_lock_irq(cmd->device->host->host_lock);
822 rc = __adpt_reset(cmd);
823 spin_unlock_irq(cmd->device->host->host_lock);
824
825 return rc;
826}
827
828
829static int adpt_hba_reset(adpt_hba* pHba)
830{
831 int rcode;
832
833 pHba->state |= DPTI_STATE_RESET;
834
835
836 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
837 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
838 adpt_i2o_delete_hba(pHba);
839 return rcode;
840 }
841
842 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
843 adpt_i2o_delete_hba(pHba);
844 return rcode;
845 }
846 PDEBUG("%s: in HOLD state\n",pHba->name);
847
848 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
849 adpt_i2o_delete_hba(pHba);
850 return rcode;
851 }
852 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
853
854 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
855 adpt_i2o_delete_hba(pHba);
856 return rcode;
857 }
858
859 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
860 adpt_i2o_delete_hba(pHba);
861 return rcode;
862 }
863 pHba->state &= ~DPTI_STATE_RESET;
864
865 adpt_fail_posted_scbs(pHba);
866 return 0;
867}
868
869
870
871
872
873
874
875static void adpt_i2o_sys_shutdown(void)
876{
877 adpt_hba *pHba, *pNext;
878 struct adpt_i2o_post_wait_data *p1, *old;
879
880 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
881 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
882
883
884
885
886 for (pHba = hba_chain; pHba; pHba = pNext) {
887 pNext = pHba->next;
888 adpt_i2o_delete_hba(pHba);
889 }
890
891
892
893
894
895
896 for(p1 = adpt_post_wait_queue; p1;) {
897 old = p1;
898 p1 = p1->next;
899 kfree(old);
900 }
901
902 adpt_post_wait_queue = NULL;
903
904 printk(KERN_INFO "Adaptec I2O controllers down.\n");
905}
906
907static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
908{
909
910 adpt_hba* pHba = NULL;
911 adpt_hba* p = NULL;
912 ulong base_addr0_phys = 0;
913 ulong base_addr1_phys = 0;
914 u32 hba_map0_area_size = 0;
915 u32 hba_map1_area_size = 0;
916 void __iomem *base_addr_virt = NULL;
917 void __iomem *msg_addr_virt = NULL;
918 int dma64 = 0;
919
920 int raptorFlag = FALSE;
921
922 if(pci_enable_device(pDev)) {
923 return -EINVAL;
924 }
925
926 if (pci_request_regions(pDev, "dpt_i2o")) {
927 PERROR("dpti: adpt_config_hba: pci request region failed\n");
928 return -EINVAL;
929 }
930
931 pci_set_master(pDev);
932
933
934
935
936 if (sizeof(dma_addr_t) > 4 &&
937 pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
938 if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
939 dma64 = 1;
940 }
941 if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
942 return -EINVAL;
943
944
945 pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
946
947 base_addr0_phys = pci_resource_start(pDev,0);
948 hba_map0_area_size = pci_resource_len(pDev,0);
949
950
951 if(pDev->device == PCI_DPT_DEVICE_ID){
952 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
953
954 hba_map0_area_size = 0x400000;
955 } else {
956 if(hba_map0_area_size > 0x100000 ){
957 hba_map0_area_size = 0x100000;
958 }
959 }
960 } else {
961
962 base_addr1_phys = pci_resource_start(pDev,1);
963 hba_map1_area_size = pci_resource_len(pDev,1);
964 raptorFlag = TRUE;
965 }
966
967#if BITS_PER_LONG == 64
968
969
970
971
972
973
974
975 if (raptorFlag == TRUE) {
976 if (hba_map0_area_size > 128)
977 hba_map0_area_size = 128;
978 if (hba_map1_area_size > 524288)
979 hba_map1_area_size = 524288;
980 } else {
981 if (hba_map0_area_size > 524288)
982 hba_map0_area_size = 524288;
983 }
984#endif
985
986 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
987 if (!base_addr_virt) {
988 pci_release_regions(pDev);
989 PERROR("dpti: adpt_config_hba: io remap failed\n");
990 return -EINVAL;
991 }
992
993 if(raptorFlag == TRUE) {
994 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
995 if (!msg_addr_virt) {
996 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
997 iounmap(base_addr_virt);
998 pci_release_regions(pDev);
999 return -EINVAL;
1000 }
1001 } else {
1002 msg_addr_virt = base_addr_virt;
1003 }
1004
1005
1006 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1007 if (!pHba) {
1008 if (msg_addr_virt != base_addr_virt)
1009 iounmap(msg_addr_virt);
1010 iounmap(base_addr_virt);
1011 pci_release_regions(pDev);
1012 return -ENOMEM;
1013 }
1014
1015 mutex_lock(&adpt_configuration_lock);
1016
1017 if(hba_chain != NULL){
1018 for(p = hba_chain; p->next; p = p->next);
1019 p->next = pHba;
1020 } else {
1021 hba_chain = pHba;
1022 }
1023 pHba->next = NULL;
1024 pHba->unit = hba_count;
1025 sprintf(pHba->name, "dpti%d", hba_count);
1026 hba_count++;
1027
1028 mutex_unlock(&adpt_configuration_lock);
1029
1030 pHba->pDev = pDev;
1031 pHba->base_addr_phys = base_addr0_phys;
1032
1033
1034 pHba->base_addr_virt = base_addr_virt;
1035 pHba->msg_addr_virt = msg_addr_virt;
1036 pHba->irq_mask = base_addr_virt+0x30;
1037 pHba->post_port = base_addr_virt+0x40;
1038 pHba->reply_port = base_addr_virt+0x44;
1039
1040 pHba->hrt = NULL;
1041 pHba->lct = NULL;
1042 pHba->lct_size = 0;
1043 pHba->status_block = NULL;
1044 pHba->post_count = 0;
1045 pHba->state = DPTI_STATE_RESET;
1046 pHba->pDev = pDev;
1047 pHba->devices = NULL;
1048 pHba->dma64 = dma64;
1049
1050
1051 spin_lock_init(&pHba->state_lock);
1052 spin_lock_init(&adpt_post_wait_lock);
1053
1054 if(raptorFlag == 0){
1055 printk(KERN_INFO "Adaptec I2O RAID controller"
1056 " %d at %p size=%x irq=%d%s\n",
1057 hba_count-1, base_addr_virt,
1058 hba_map0_area_size, pDev->irq,
1059 dma64 ? " (64-bit DMA)" : "");
1060 } else {
1061 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1062 hba_count-1, pDev->irq,
1063 dma64 ? " (64-bit DMA)" : "");
1064 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1065 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1066 }
1067
1068 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1069 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1070 adpt_i2o_delete_hba(pHba);
1071 return -EINVAL;
1072 }
1073
1074 return 0;
1075}
1076
1077
1078static void adpt_i2o_delete_hba(adpt_hba* pHba)
1079{
1080 adpt_hba* p1;
1081 adpt_hba* p2;
1082 struct i2o_device* d;
1083 struct i2o_device* next;
1084 int i;
1085 int j;
1086 struct adpt_device* pDev;
1087 struct adpt_device* pNext;
1088
1089
1090 mutex_lock(&adpt_configuration_lock);
1091 if(pHba->host){
1092 free_irq(pHba->host->irq, pHba);
1093 }
1094 p2 = NULL;
1095 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1096 if(p1 == pHba) {
1097 if(p2) {
1098 p2->next = p1->next;
1099 } else {
1100 hba_chain = p1->next;
1101 }
1102 break;
1103 }
1104 }
1105
1106 hba_count--;
1107 mutex_unlock(&adpt_configuration_lock);
1108
1109 iounmap(pHba->base_addr_virt);
1110 pci_release_regions(pHba->pDev);
1111 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1112 iounmap(pHba->msg_addr_virt);
1113 }
1114 if(pHba->FwDebugBuffer_P)
1115 iounmap(pHba->FwDebugBuffer_P);
1116 if(pHba->hrt) {
1117 dma_free_coherent(&pHba->pDev->dev,
1118 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1119 pHba->hrt, pHba->hrt_pa);
1120 }
1121 if(pHba->lct) {
1122 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1123 pHba->lct, pHba->lct_pa);
1124 }
1125 if(pHba->status_block) {
1126 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1127 pHba->status_block, pHba->status_block_pa);
1128 }
1129 if(pHba->reply_pool) {
1130 dma_free_coherent(&pHba->pDev->dev,
1131 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1132 pHba->reply_pool, pHba->reply_pool_pa);
1133 }
1134
1135 for(d = pHba->devices; d ; d = next){
1136 next = d->next;
1137 kfree(d);
1138 }
1139 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1140 for(j = 0; j < MAX_ID; j++){
1141 if(pHba->channel[i].device[j] != NULL){
1142 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1143 pNext = pDev->next_lun;
1144 kfree(pDev);
1145 }
1146 }
1147 }
1148 }
1149 pci_dev_put(pHba->pDev);
1150 if (adpt_sysfs_class)
1151 device_destroy(adpt_sysfs_class,
1152 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1153 kfree(pHba);
1154
1155 if(hba_count <= 0){
1156 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1157 if (adpt_sysfs_class) {
1158 class_destroy(adpt_sysfs_class);
1159 adpt_sysfs_class = NULL;
1160 }
1161 }
1162}
1163
1164static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1165{
1166 struct adpt_device* d;
1167
1168 if(chan < 0 || chan >= MAX_CHANNEL)
1169 return NULL;
1170
1171 d = pHba->channel[chan].device[id];
1172 if(!d || d->tid == 0) {
1173 return NULL;
1174 }
1175
1176
1177 if(d->scsi_lun == lun){
1178 return d;
1179 }
1180
1181
1182 for(d=d->next_lun ; d ; d = d->next_lun){
1183 if(d->scsi_lun == lun){
1184 return d;
1185 }
1186 }
1187 return NULL;
1188}
1189
1190
1191static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1192{
1193
1194
1195
1196 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1197 int status = 0;
1198 ulong flags = 0;
1199 struct adpt_i2o_post_wait_data *p1, *p2;
1200 struct adpt_i2o_post_wait_data *wait_data =
1201 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1202 DECLARE_WAITQUEUE(wait, current);
1203
1204 if (!wait_data)
1205 return -ENOMEM;
1206
1207
1208
1209
1210
1211 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1212
1213
1214 wait_data->next = adpt_post_wait_queue;
1215 adpt_post_wait_queue = wait_data;
1216 adpt_post_wait_id++;
1217 adpt_post_wait_id &= 0x7fff;
1218 wait_data->id = adpt_post_wait_id;
1219 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1220
1221 wait_data->wq = &adpt_wq_i2o_post;
1222 wait_data->status = -ETIMEDOUT;
1223
1224 add_wait_queue(&adpt_wq_i2o_post, &wait);
1225
1226 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1227 timeout *= HZ;
1228 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1229 set_current_state(TASK_INTERRUPTIBLE);
1230 if(pHba->host)
1231 spin_unlock_irq(pHba->host->host_lock);
1232 if (!timeout)
1233 schedule();
1234 else{
1235 timeout = schedule_timeout(timeout);
1236 if (timeout == 0) {
1237
1238
1239
1240 status = -ETIME;
1241 }
1242 }
1243 if(pHba->host)
1244 spin_lock_irq(pHba->host->host_lock);
1245 }
1246 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1247
1248 if(status == -ETIMEDOUT){
1249 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1250
1251 return status;
1252 }
1253
1254
1255 p2 = NULL;
1256 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1257 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1258 if(p1 == wait_data) {
1259 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1260 status = -EOPNOTSUPP;
1261 }
1262 if(p2) {
1263 p2->next = p1->next;
1264 } else {
1265 adpt_post_wait_queue = p1->next;
1266 }
1267 break;
1268 }
1269 }
1270 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1271
1272 kfree(wait_data);
1273
1274 return status;
1275}
1276
1277
1278static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1279{
1280
1281 u32 m = EMPTY_QUEUE;
1282 u32 __iomem *msg;
1283 ulong timeout = jiffies + 30*HZ;
1284 do {
1285 rmb();
1286 m = readl(pHba->post_port);
1287 if (m != EMPTY_QUEUE) {
1288 break;
1289 }
1290 if(time_after(jiffies,timeout)){
1291 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1292 return -ETIMEDOUT;
1293 }
1294 schedule_timeout_uninterruptible(1);
1295 } while(m == EMPTY_QUEUE);
1296
1297 msg = pHba->msg_addr_virt + m;
1298 memcpy_toio(msg, data, len);
1299 wmb();
1300
1301
1302 writel(m, pHba->post_port);
1303 wmb();
1304
1305 return 0;
1306}
1307
1308
1309static void adpt_i2o_post_wait_complete(u32 context, int status)
1310{
1311 struct adpt_i2o_post_wait_data *p1 = NULL;
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325 context &= 0x7fff;
1326
1327 spin_lock(&adpt_post_wait_lock);
1328 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1329 if(p1->id == context) {
1330 p1->status = status;
1331 spin_unlock(&adpt_post_wait_lock);
1332 wake_up_interruptible(p1->wq);
1333 return;
1334 }
1335 }
1336 spin_unlock(&adpt_post_wait_lock);
1337
1338 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1339 printk(KERN_DEBUG" Tasks in wait queue:\n");
1340 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1341 printk(KERN_DEBUG" %d\n",p1->id);
1342 }
1343 return;
1344}
1345
1346static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1347{
1348 u32 msg[8];
1349 u8* status;
1350 dma_addr_t addr;
1351 u32 m = EMPTY_QUEUE ;
1352 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1353
1354 if(pHba->initialized == FALSE) {
1355 timeout = jiffies + (25*HZ);
1356 } else {
1357 adpt_i2o_quiesce_hba(pHba);
1358 }
1359
1360 do {
1361 rmb();
1362 m = readl(pHba->post_port);
1363 if (m != EMPTY_QUEUE) {
1364 break;
1365 }
1366 if(time_after(jiffies,timeout)){
1367 printk(KERN_WARNING"Timeout waiting for message!\n");
1368 return -ETIMEDOUT;
1369 }
1370 schedule_timeout_uninterruptible(1);
1371 } while (m == EMPTY_QUEUE);
1372
1373 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1374 if(status == NULL) {
1375 adpt_send_nop(pHba, m);
1376 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1377 return -ENOMEM;
1378 }
1379 memset(status,0,4);
1380
1381 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1382 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1383 msg[2]=0;
1384 msg[3]=0;
1385 msg[4]=0;
1386 msg[5]=0;
1387 msg[6]=dma_low(addr);
1388 msg[7]=dma_high(addr);
1389
1390 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1391 wmb();
1392 writel(m, pHba->post_port);
1393 wmb();
1394
1395 while(*status == 0){
1396 if(time_after(jiffies,timeout)){
1397 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1398
1399
1400
1401
1402 return -ETIMEDOUT;
1403 }
1404 rmb();
1405 schedule_timeout_uninterruptible(1);
1406 }
1407
1408 if(*status == 0x01 ) {
1409 PDEBUG("%s: Reset in progress...\n", pHba->name);
1410
1411
1412 do {
1413 rmb();
1414 m = readl(pHba->post_port);
1415 if (m != EMPTY_QUEUE) {
1416 break;
1417 }
1418 if(time_after(jiffies,timeout)){
1419 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1420
1421
1422
1423
1424 return -ETIMEDOUT;
1425 }
1426 schedule_timeout_uninterruptible(1);
1427 } while (m == EMPTY_QUEUE);
1428
1429 adpt_send_nop(pHba, m);
1430 }
1431 adpt_i2o_status_get(pHba);
1432 if(*status == 0x02 ||
1433 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1434 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1435 pHba->name);
1436 } else {
1437 PDEBUG("%s: Reset completed.\n", pHba->name);
1438 }
1439
1440 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1441#ifdef UARTDELAY
1442
1443
1444 adpt_delay(20000);
1445#endif
1446 return 0;
1447}
1448
1449
1450static int adpt_i2o_parse_lct(adpt_hba* pHba)
1451{
1452 int i;
1453 int max;
1454 int tid;
1455 struct i2o_device *d;
1456 i2o_lct *lct = pHba->lct;
1457 u8 bus_no = 0;
1458 s16 scsi_id;
1459 u64 scsi_lun;
1460 u32 buf[10];
1461 struct adpt_device* pDev;
1462
1463 if (lct == NULL) {
1464 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1465 return -1;
1466 }
1467
1468 max = lct->table_size;
1469 max -= 3;
1470 max /= 9;
1471
1472 for(i=0;i<max;i++) {
1473 if( lct->lct_entry[i].user_tid != 0xfff){
1474
1475
1476
1477
1478
1479
1480
1481 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1482 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1483 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1484 continue;
1485 }
1486 tid = lct->lct_entry[i].tid;
1487
1488 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1489 continue;
1490 }
1491 bus_no = buf[0]>>16;
1492 scsi_id = buf[1];
1493 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1494 if(bus_no >= MAX_CHANNEL) {
1495 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1496 continue;
1497 }
1498 if (scsi_id >= MAX_ID){
1499 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1500 continue;
1501 }
1502 if(bus_no > pHba->top_scsi_channel){
1503 pHba->top_scsi_channel = bus_no;
1504 }
1505 if(scsi_id > pHba->top_scsi_id){
1506 pHba->top_scsi_id = scsi_id;
1507 }
1508 if(scsi_lun > pHba->top_scsi_lun){
1509 pHba->top_scsi_lun = scsi_lun;
1510 }
1511 continue;
1512 }
1513 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1514 if(d==NULL)
1515 {
1516 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1517 return -ENOMEM;
1518 }
1519
1520 d->controller = pHba;
1521 d->next = NULL;
1522
1523 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1524
1525 d->flags = 0;
1526 tid = d->lct_data.tid;
1527 adpt_i2o_report_hba_unit(pHba, d);
1528 adpt_i2o_install_device(pHba, d);
1529 }
1530 bus_no = 0;
1531 for(d = pHba->devices; d ; d = d->next) {
1532 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1533 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1534 tid = d->lct_data.tid;
1535
1536
1537 if(bus_no > pHba->top_scsi_channel){
1538 pHba->top_scsi_channel = bus_no;
1539 }
1540 pHba->channel[bus_no].type = d->lct_data.class_id;
1541 pHba->channel[bus_no].tid = tid;
1542 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1543 {
1544 pHba->channel[bus_no].scsi_id = buf[1];
1545 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1546 }
1547
1548 bus_no++;
1549 if(bus_no >= MAX_CHANNEL) {
1550 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1551 break;
1552 }
1553 }
1554 }
1555
1556
1557 for(d = pHba->devices; d ; d = d->next) {
1558 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1559 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1560 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1561
1562 tid = d->lct_data.tid;
1563 scsi_id = -1;
1564
1565 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1566 bus_no = buf[0]>>16;
1567 scsi_id = buf[1];
1568 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1569 if(bus_no >= MAX_CHANNEL) {
1570 continue;
1571 }
1572 if (scsi_id >= MAX_ID) {
1573 continue;
1574 }
1575 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1576 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1577 if(pDev == NULL) {
1578 return -ENOMEM;
1579 }
1580 pHba->channel[bus_no].device[scsi_id] = pDev;
1581 } else {
1582 for( pDev = pHba->channel[bus_no].device[scsi_id];
1583 pDev->next_lun; pDev = pDev->next_lun){
1584 }
1585 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1586 if(pDev->next_lun == NULL) {
1587 return -ENOMEM;
1588 }
1589 pDev = pDev->next_lun;
1590 }
1591 pDev->tid = tid;
1592 pDev->scsi_channel = bus_no;
1593 pDev->scsi_id = scsi_id;
1594 pDev->scsi_lun = scsi_lun;
1595 pDev->pI2o_dev = d;
1596 d->owner = pDev;
1597 pDev->type = (buf[0])&0xff;
1598 pDev->flags = (buf[0]>>8)&0xff;
1599 if(scsi_id > pHba->top_scsi_id){
1600 pHba->top_scsi_id = scsi_id;
1601 }
1602 if(scsi_lun > pHba->top_scsi_lun){
1603 pHba->top_scsi_lun = scsi_lun;
1604 }
1605 }
1606 if(scsi_id == -1){
1607 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1608 d->lct_data.identity_tag);
1609 }
1610 }
1611 }
1612 return 0;
1613}
1614
1615
1616
1617
1618
1619
1620
1621static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1622{
1623 mutex_lock(&adpt_configuration_lock);
1624 d->controller=pHba;
1625 d->owner=NULL;
1626 d->next=pHba->devices;
1627 d->prev=NULL;
1628 if (pHba->devices != NULL){
1629 pHba->devices->prev=d;
1630 }
1631 pHba->devices=d;
1632 *d->dev_name = 0;
1633
1634 mutex_unlock(&adpt_configuration_lock);
1635 return 0;
1636}
1637
1638static int adpt_open(struct inode *inode, struct file *file)
1639{
1640 int minor;
1641 adpt_hba* pHba;
1642
1643 mutex_lock(&adpt_mutex);
1644
1645
1646 minor = iminor(inode);
1647 if (minor >= hba_count) {
1648 mutex_unlock(&adpt_mutex);
1649 return -ENXIO;
1650 }
1651 mutex_lock(&adpt_configuration_lock);
1652 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1653 if (pHba->unit == minor) {
1654 break;
1655 }
1656 }
1657 if (pHba == NULL) {
1658 mutex_unlock(&adpt_configuration_lock);
1659 mutex_unlock(&adpt_mutex);
1660 return -ENXIO;
1661 }
1662
1663
1664
1665
1666
1667
1668 pHba->in_use = 1;
1669 mutex_unlock(&adpt_configuration_lock);
1670 mutex_unlock(&adpt_mutex);
1671
1672 return 0;
1673}
1674
1675static int adpt_close(struct inode *inode, struct file *file)
1676{
1677 int minor;
1678 adpt_hba* pHba;
1679
1680 minor = iminor(inode);
1681 if (minor >= hba_count) {
1682 return -ENXIO;
1683 }
1684 mutex_lock(&adpt_configuration_lock);
1685 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1686 if (pHba->unit == minor) {
1687 break;
1688 }
1689 }
1690 mutex_unlock(&adpt_configuration_lock);
1691 if (pHba == NULL) {
1692 return -ENXIO;
1693 }
1694
1695 pHba->in_use = 0;
1696
1697 return 0;
1698}
1699
1700
1701static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1702{
1703 u32 msg[MAX_MESSAGE_SIZE];
1704 u32* reply = NULL;
1705 u32 size = 0;
1706 u32 reply_size = 0;
1707 u32 __user *user_msg = arg;
1708 u32 __user * user_reply = NULL;
1709 void **sg_list = NULL;
1710 u32 sg_offset = 0;
1711 u32 sg_count = 0;
1712 int sg_index = 0;
1713 u32 i = 0;
1714 u32 rcode = 0;
1715 void *p = NULL;
1716 dma_addr_t addr;
1717 ulong flags = 0;
1718
1719 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1720
1721 if(get_user(size, &user_msg[0])){
1722 return -EFAULT;
1723 }
1724 size = size>>16;
1725
1726 user_reply = &user_msg[size];
1727 if(size > MAX_MESSAGE_SIZE){
1728 return -EFAULT;
1729 }
1730 size *= 4;
1731
1732
1733 if(copy_from_user(msg, user_msg, size)) {
1734 return -EFAULT;
1735 }
1736 get_user(reply_size, &user_reply[0]);
1737 reply_size = reply_size>>16;
1738 if(reply_size > REPLY_FRAME_SIZE){
1739 reply_size = REPLY_FRAME_SIZE;
1740 }
1741 reply_size *= 4;
1742 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1743 if(reply == NULL) {
1744 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1745 return -ENOMEM;
1746 }
1747 sg_offset = (msg[0]>>4)&0xf;
1748 msg[2] = 0x40000000;
1749 msg[3] = adpt_ioctl_to_context(pHba, reply);
1750 if (msg[3] == (u32)-1) {
1751 rcode = -EBUSY;
1752 goto free;
1753 }
1754
1755 sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
1756 if (!sg_list) {
1757 rcode = -ENOMEM;
1758 goto free;
1759 }
1760 if(sg_offset) {
1761
1762 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1763 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1764 if (sg_count > pHba->sg_tablesize){
1765 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1766 rcode = -EINVAL;
1767 goto free;
1768 }
1769
1770 for(i = 0; i < sg_count; i++) {
1771 int sg_size;
1772
1773 if (!(sg[i].flag_count & 0x10000000 )) {
1774 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1775 rcode = -EINVAL;
1776 goto cleanup;
1777 }
1778 sg_size = sg[i].flag_count & 0xffffff;
1779
1780 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1781 if(!p) {
1782 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1783 pHba->name,sg_size,i,sg_count);
1784 rcode = -ENOMEM;
1785 goto cleanup;
1786 }
1787 sg_list[sg_index++] = p;
1788
1789 if(sg[i].flag_count & 0x04000000 ) {
1790
1791 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1792 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1793 rcode = -EFAULT;
1794 goto cleanup;
1795 }
1796 }
1797
1798 sg[i].addr_bus = addr;
1799 }
1800 }
1801
1802 do {
1803
1804
1805
1806
1807 if (pHba->host) {
1808 scsi_block_requests(pHba->host);
1809 spin_lock_irqsave(pHba->host->host_lock, flags);
1810 }
1811 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1812 if (rcode != 0)
1813 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1814 rcode, reply);
1815 if (pHba->host) {
1816 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1817 scsi_unblock_requests(pHba->host);
1818 }
1819 } while (rcode == -ETIMEDOUT);
1820
1821 if(rcode){
1822 goto cleanup;
1823 }
1824
1825 if(sg_offset) {
1826
1827 u32 j;
1828
1829 struct sg_simple_element* sg;
1830 int sg_size;
1831
1832
1833 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1834
1835 if(get_user(size, &user_msg[0])){
1836 rcode = -EFAULT;
1837 goto cleanup;
1838 }
1839 size = size>>16;
1840 size *= 4;
1841 if (size > MAX_MESSAGE_SIZE) {
1842 rcode = -EINVAL;
1843 goto cleanup;
1844 }
1845
1846 if (copy_from_user (msg, user_msg, size)) {
1847 rcode = -EFAULT;
1848 goto cleanup;
1849 }
1850 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1851
1852
1853 sg = (struct sg_simple_element*)(msg + sg_offset);
1854 for (j = 0; j < sg_count; j++) {
1855
1856 if(! (sg[j].flag_count & 0x4000000 )) {
1857 sg_size = sg[j].flag_count & 0xffffff;
1858
1859 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1860 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1861 rcode = -EFAULT;
1862 goto cleanup;
1863 }
1864 }
1865 }
1866 }
1867
1868
1869 if (reply_size) {
1870
1871 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1872 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1873 rcode = -EFAULT;
1874 }
1875 if(copy_to_user(user_reply, reply, reply_size)) {
1876 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1877 rcode = -EFAULT;
1878 }
1879 }
1880
1881
1882cleanup:
1883 if (rcode != -ETIME && rcode != -EINTR) {
1884 struct sg_simple_element *sg =
1885 (struct sg_simple_element*) (msg +sg_offset);
1886 while(sg_index) {
1887 if(sg_list[--sg_index]) {
1888 dma_free_coherent(&pHba->pDev->dev,
1889 sg[sg_index].flag_count & 0xffffff,
1890 sg_list[sg_index],
1891 sg[sg_index].addr_bus);
1892 }
1893 }
1894 }
1895
1896free:
1897 kfree(sg_list);
1898 kfree(reply);
1899 return rcode;
1900}
1901
1902#if defined __ia64__
1903static void adpt_ia64_info(sysInfo_S* si)
1904{
1905
1906
1907
1908 si->processorType = PROC_IA64;
1909}
1910#endif
1911
1912#if defined __sparc__
1913static void adpt_sparc_info(sysInfo_S* si)
1914{
1915
1916
1917
1918 si->processorType = PROC_ULTRASPARC;
1919}
1920#endif
1921#if defined __alpha__
1922static void adpt_alpha_info(sysInfo_S* si)
1923{
1924
1925
1926
1927 si->processorType = PROC_ALPHA;
1928}
1929#endif
1930
1931#if defined __i386__
1932
1933#include <uapi/asm/vm86.h>
1934
1935static void adpt_i386_info(sysInfo_S* si)
1936{
1937
1938
1939
1940 switch (boot_cpu_data.x86) {
1941 case CPU_386:
1942 si->processorType = PROC_386;
1943 break;
1944 case CPU_486:
1945 si->processorType = PROC_486;
1946 break;
1947 case CPU_586:
1948 si->processorType = PROC_PENTIUM;
1949 break;
1950 default:
1951 si->processorType = PROC_PENTIUM;
1952 break;
1953 }
1954}
1955#endif
1956
1957
1958
1959
1960
1961
1962
1963static int adpt_system_info(void __user *buffer)
1964{
1965 sysInfo_S si;
1966
1967 memset(&si, 0, sizeof(si));
1968
1969 si.osType = OS_LINUX;
1970 si.osMajorVersion = 0;
1971 si.osMinorVersion = 0;
1972 si.osRevision = 0;
1973 si.busType = SI_PCI_BUS;
1974 si.processorFamily = DPTI_sig.dsProcessorFamily;
1975
1976#if defined __i386__
1977 adpt_i386_info(&si);
1978#elif defined (__ia64__)
1979 adpt_ia64_info(&si);
1980#elif defined(__sparc__)
1981 adpt_sparc_info(&si);
1982#elif defined (__alpha__)
1983 adpt_alpha_info(&si);
1984#else
1985 si.processorType = 0xff ;
1986#endif
1987 if (copy_to_user(buffer, &si, sizeof(si))){
1988 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1989 return -EFAULT;
1990 }
1991
1992 return 0;
1993}
1994
1995static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1996{
1997 int minor;
1998 int error = 0;
1999 adpt_hba* pHba;
2000 ulong flags = 0;
2001 void __user *argp = (void __user *)arg;
2002
2003 minor = iminor(inode);
2004 if (minor >= DPTI_MAX_HBA){
2005 return -ENXIO;
2006 }
2007 mutex_lock(&adpt_configuration_lock);
2008 for (pHba = hba_chain; pHba; pHba = pHba->next) {
2009 if (pHba->unit == minor) {
2010 break;
2011 }
2012 }
2013 mutex_unlock(&adpt_configuration_lock);
2014 if(pHba == NULL){
2015 return -ENXIO;
2016 }
2017
2018 while((volatile u32) pHba->state & DPTI_STATE_RESET )
2019 schedule_timeout_uninterruptible(2);
2020
2021 switch (cmd) {
2022
2023 case DPT_SIGNATURE:
2024 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2025 return -EFAULT;
2026 }
2027 break;
2028 case I2OUSRCMD:
2029 return adpt_i2o_passthru(pHba, argp);
2030
2031 case DPT_CTRLINFO:{
2032 drvrHBAinfo_S HbaInfo;
2033
2034#define FLG_OSD_PCI_VALID 0x0001
2035#define FLG_OSD_DMA 0x0002
2036#define FLG_OSD_I2O 0x0004
2037 memset(&HbaInfo, 0, sizeof(HbaInfo));
2038 HbaInfo.drvrHBAnum = pHba->unit;
2039 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2040 HbaInfo.blinkState = adpt_read_blink_led(pHba);
2041 HbaInfo.pciBusNum = pHba->pDev->bus->number;
2042 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2043 HbaInfo.Interrupt = pHba->pDev->irq;
2044 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2045 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2046 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2047 return -EFAULT;
2048 }
2049 break;
2050 }
2051 case DPT_SYSINFO:
2052 return adpt_system_info(argp);
2053 case DPT_BLINKLED:{
2054 u32 value;
2055 value = (u32)adpt_read_blink_led(pHba);
2056 if (copy_to_user(argp, &value, sizeof(value))) {
2057 return -EFAULT;
2058 }
2059 break;
2060 }
2061 case I2ORESETCMD: {
2062 struct Scsi_Host *shost = pHba->host;
2063
2064 if (shost)
2065 spin_lock_irqsave(shost->host_lock, flags);
2066 adpt_hba_reset(pHba);
2067 if (shost)
2068 spin_unlock_irqrestore(shost->host_lock, flags);
2069 break;
2070 }
2071 case I2ORESCANCMD:
2072 adpt_rescan(pHba);
2073 break;
2074 default:
2075 return -EINVAL;
2076 }
2077
2078 return error;
2079}
2080
2081static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2082{
2083 struct inode *inode;
2084 long ret;
2085
2086 inode = file_inode(file);
2087
2088 mutex_lock(&adpt_mutex);
2089 ret = adpt_ioctl(inode, file, cmd, arg);
2090 mutex_unlock(&adpt_mutex);
2091
2092 return ret;
2093}
2094
2095#ifdef CONFIG_COMPAT
2096static long compat_adpt_ioctl(struct file *file,
2097 unsigned int cmd, unsigned long arg)
2098{
2099 struct inode *inode;
2100 long ret;
2101
2102 inode = file_inode(file);
2103
2104 mutex_lock(&adpt_mutex);
2105
2106 switch(cmd) {
2107 case DPT_SIGNATURE:
2108 case I2OUSRCMD:
2109 case DPT_CTRLINFO:
2110 case DPT_SYSINFO:
2111 case DPT_BLINKLED:
2112 case I2ORESETCMD:
2113 case I2ORESCANCMD:
2114 case (DPT_TARGET_BUSY & 0xFFFF):
2115 case DPT_TARGET_BUSY:
2116 ret = adpt_ioctl(inode, file, cmd, arg);
2117 break;
2118 default:
2119 ret = -ENOIOCTLCMD;
2120 }
2121
2122 mutex_unlock(&adpt_mutex);
2123
2124 return ret;
2125}
2126#endif
2127
2128static irqreturn_t adpt_isr(int irq, void *dev_id)
2129{
2130 struct scsi_cmnd* cmd;
2131 adpt_hba* pHba = dev_id;
2132 u32 m;
2133 void __iomem *reply;
2134 u32 status=0;
2135 u32 context;
2136 ulong flags = 0;
2137 int handled = 0;
2138
2139 if (pHba == NULL){
2140 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2141 return IRQ_NONE;
2142 }
2143 if(pHba->host)
2144 spin_lock_irqsave(pHba->host->host_lock, flags);
2145
2146 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2147 m = readl(pHba->reply_port);
2148 if(m == EMPTY_QUEUE){
2149
2150 rmb();
2151 m = readl(pHba->reply_port);
2152 if(m == EMPTY_QUEUE){
2153
2154 printk(KERN_ERR"dpti: Could not get reply frame\n");
2155 goto out;
2156 }
2157 }
2158 if (pHba->reply_pool_pa <= m &&
2159 m < pHba->reply_pool_pa +
2160 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2161 reply = (u8 *)pHba->reply_pool +
2162 (m - pHba->reply_pool_pa);
2163 } else {
2164
2165 printk(KERN_ERR "dpti: reply frame not from pool\n");
2166 reply = (u8 *)bus_to_virt(m);
2167 }
2168
2169 if (readl(reply) & MSG_FAIL) {
2170 u32 old_m = readl(reply+28);
2171 void __iomem *msg;
2172 u32 old_context;
2173 PDEBUG("%s: Failed message\n",pHba->name);
2174 if(old_m >= 0x100000){
2175 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2176 writel(m,pHba->reply_port);
2177 continue;
2178 }
2179
2180 msg = pHba->msg_addr_virt + old_m;
2181 old_context = readl(msg+12);
2182 writel(old_context, reply+12);
2183 adpt_send_nop(pHba, old_m);
2184 }
2185 context = readl(reply+8);
2186 if(context & 0x40000000){
2187 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2188 if( p != NULL) {
2189 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2190 }
2191
2192 }
2193 if(context & 0x80000000){
2194 status = readl(reply+16);
2195 if(status >> 24){
2196 status &= 0xffff;
2197 } else {
2198 status = I2O_POST_WAIT_OK;
2199 }
2200 if(!(context & 0x40000000)) {
2201 cmd = adpt_cmd_from_context(pHba,
2202 readl(reply+12));
2203 if(cmd != NULL) {
2204 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2205 }
2206 }
2207 adpt_i2o_post_wait_complete(context, status);
2208 } else {
2209 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2210 if(cmd != NULL){
2211 scsi_dma_unmap(cmd);
2212 if(cmd->serial_number != 0) {
2213 adpt_i2o_to_scsi(reply, cmd);
2214 }
2215 }
2216 }
2217 writel(m, pHba->reply_port);
2218 wmb();
2219 rmb();
2220 }
2221 handled = 1;
2222out: if(pHba->host)
2223 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2224 return IRQ_RETVAL(handled);
2225}
2226
2227static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2228{
2229 int i;
2230 u32 msg[MAX_MESSAGE_SIZE];
2231 u32* mptr;
2232 u32* lptr;
2233 u32 *lenptr;
2234 int direction;
2235 int scsidir;
2236 int nseg;
2237 u32 len;
2238 u32 reqlen;
2239 s32 rcode;
2240 dma_addr_t addr;
2241
2242 memset(msg, 0 , sizeof(msg));
2243 len = scsi_bufflen(cmd);
2244 direction = 0x00000000;
2245
2246 scsidir = 0x00000000;
2247 if(len) {
2248
2249
2250
2251
2252
2253
2254 switch(cmd->sc_data_direction){
2255 case DMA_FROM_DEVICE:
2256 scsidir =0x40000000;
2257 break;
2258 case DMA_TO_DEVICE:
2259 direction=0x04000000;
2260 scsidir =0x80000000;
2261 break;
2262 case DMA_NONE:
2263 break;
2264 case DMA_BIDIRECTIONAL:
2265 scsidir =0x40000000;
2266
2267 break;
2268 default:
2269 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2270 pHba->name, cmd->cmnd[0]);
2271 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2272 cmd->scsi_done(cmd);
2273 return 0;
2274 }
2275 }
2276
2277
2278 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2279 msg[2] = 0;
2280 msg[3] = adpt_cmd_to_context(cmd);
2281
2282
2283 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2284 msg[5] = d->tid;
2285
2286
2287
2288
2289 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2290
2291 mptr=msg+7;
2292
2293
2294 memset(mptr, 0, 16);
2295 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2296 mptr+=4;
2297 lenptr=mptr++;
2298 if (dpt_dma64(pHba)) {
2299 reqlen = 16;
2300 *mptr++ = (0x7C<<24)+(2<<16)+0x02;
2301 *mptr++ = 1 << PAGE_SHIFT;
2302 } else {
2303 reqlen = 14;
2304 }
2305
2306
2307 nseg = scsi_dma_map(cmd);
2308 BUG_ON(nseg < 0);
2309 if (nseg) {
2310 struct scatterlist *sg;
2311
2312 len = 0;
2313 scsi_for_each_sg(cmd, sg, nseg, i) {
2314 lptr = mptr;
2315 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2316 len+=sg_dma_len(sg);
2317 addr = sg_dma_address(sg);
2318 *mptr++ = dma_low(addr);
2319 if (dpt_dma64(pHba))
2320 *mptr++ = dma_high(addr);
2321
2322 if (i == nseg - 1)
2323 *lptr = direction|0xD0000000|sg_dma_len(sg);
2324 }
2325 reqlen = mptr - msg;
2326 *lenptr = len;
2327
2328 if(cmd->underflow && len != cmd->underflow){
2329 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2330 len, cmd->underflow);
2331 }
2332 } else {
2333 *lenptr = len = 0;
2334 reqlen = 12;
2335 }
2336
2337
2338 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2339
2340
2341 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2342 if (rcode == 0) {
2343 return 0;
2344 }
2345 return rcode;
2346}
2347
2348
2349static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2350{
2351 struct Scsi_Host *host;
2352
2353 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2354 if (host == NULL) {
2355 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2356 return -1;
2357 }
2358 host->hostdata[0] = (unsigned long)pHba;
2359 pHba->host = host;
2360
2361 host->irq = pHba->pDev->irq;
2362
2363
2364
2365 host->io_port = 0;
2366 host->n_io_port = 0;
2367
2368 host->max_id = 16;
2369 host->max_lun = 256;
2370 host->max_channel = pHba->top_scsi_channel + 1;
2371 host->cmd_per_lun = 1;
2372 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2373 host->sg_tablesize = pHba->sg_tablesize;
2374 host->can_queue = pHba->post_fifo_size;
2375 host->use_cmd_list = 1;
2376
2377 return 0;
2378}
2379
2380
2381static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2382{
2383 adpt_hba* pHba;
2384 u32 hba_status;
2385 u32 dev_status;
2386 u32 reply_flags = readl(reply) & 0xff00;
2387
2388
2389
2390 u16 detailed_status = readl(reply+16) &0xffff;
2391 dev_status = (detailed_status & 0xff);
2392 hba_status = detailed_status >> 8;
2393
2394
2395 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2396
2397 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2398
2399 cmd->sense_buffer[0] = '\0';
2400
2401 if(!(reply_flags & MSG_FAIL)) {
2402 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2403 case I2O_SCSI_DSC_SUCCESS:
2404 cmd->result = (DID_OK << 16);
2405
2406 if (readl(reply+20) < cmd->underflow) {
2407 cmd->result = (DID_ERROR <<16);
2408 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2409 }
2410 break;
2411 case I2O_SCSI_DSC_REQUEST_ABORTED:
2412 cmd->result = (DID_ABORT << 16);
2413 break;
2414 case I2O_SCSI_DSC_PATH_INVALID:
2415 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2416 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2417 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2418 case I2O_SCSI_DSC_NO_ADAPTER:
2419 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2420 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2421 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2422 cmd->result = (DID_TIME_OUT << 16);
2423 break;
2424 case I2O_SCSI_DSC_ADAPTER_BUSY:
2425 case I2O_SCSI_DSC_BUS_BUSY:
2426 cmd->result = (DID_BUS_BUSY << 16);
2427 break;
2428 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2429 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2430 cmd->result = (DID_RESET << 16);
2431 break;
2432 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2433 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2434 cmd->result = (DID_PARITY << 16);
2435 break;
2436 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2437 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2438 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2439 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2440 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2441 case I2O_SCSI_DSC_DATA_OVERRUN:
2442 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2443 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2444 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2445 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2446 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2447 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2448 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2449 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2450 case I2O_SCSI_DSC_INVALID_CDB:
2451 case I2O_SCSI_DSC_LUN_INVALID:
2452 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2453 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2454 case I2O_SCSI_DSC_NO_NEXUS:
2455 case I2O_SCSI_DSC_CDB_RECEIVED:
2456 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2457 case I2O_SCSI_DSC_QUEUE_FROZEN:
2458 case I2O_SCSI_DSC_REQUEST_INVALID:
2459 default:
2460 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2461 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2462 hba_status, dev_status, cmd->cmnd[0]);
2463 cmd->result = (DID_ERROR << 16);
2464 break;
2465 }
2466
2467
2468
2469 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2470 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2471
2472 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2473 if(cmd->sense_buffer[0] == 0x70 &&
2474 cmd->sense_buffer[2] == DATA_PROTECT ){
2475
2476 cmd->result = (DID_TIME_OUT << 16);
2477 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2478 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2479 hba_status, dev_status, cmd->cmnd[0]);
2480
2481 }
2482 }
2483 } else {
2484
2485
2486
2487
2488 cmd->result = (DID_TIME_OUT << 16);
2489 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2490 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2491 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2492 }
2493
2494 cmd->result |= (dev_status);
2495
2496 if(cmd->scsi_done != NULL){
2497 cmd->scsi_done(cmd);
2498 }
2499 return cmd->result;
2500}
2501
2502
2503static s32 adpt_rescan(adpt_hba* pHba)
2504{
2505 s32 rcode;
2506 ulong flags = 0;
2507
2508 if(pHba->host)
2509 spin_lock_irqsave(pHba->host->host_lock, flags);
2510 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2511 goto out;
2512 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2513 goto out;
2514 rcode = 0;
2515out: if(pHba->host)
2516 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2517 return rcode;
2518}
2519
2520
2521static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2522{
2523 int i;
2524 int max;
2525 int tid;
2526 struct i2o_device *d;
2527 i2o_lct *lct = pHba->lct;
2528 u8 bus_no = 0;
2529 s16 scsi_id;
2530 u64 scsi_lun;
2531 u32 buf[10];
2532 struct adpt_device* pDev = NULL;
2533 struct i2o_device* pI2o_dev = NULL;
2534
2535 if (lct == NULL) {
2536 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2537 return -1;
2538 }
2539
2540 max = lct->table_size;
2541 max -= 3;
2542 max /= 9;
2543
2544
2545 for (d = pHba->devices; d; d = d->next) {
2546 pDev =(struct adpt_device*) d->owner;
2547 if(!pDev){
2548 continue;
2549 }
2550 pDev->state |= DPTI_DEV_UNSCANNED;
2551 }
2552
2553 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2554
2555 for(i=0;i<max;i++) {
2556 if( lct->lct_entry[i].user_tid != 0xfff){
2557 continue;
2558 }
2559
2560 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2561 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2562 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2563 tid = lct->lct_entry[i].tid;
2564 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2565 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2566 continue;
2567 }
2568 bus_no = buf[0]>>16;
2569 if (bus_no >= MAX_CHANNEL) {
2570 printk(KERN_WARNING
2571 "%s: Channel number %d out of range\n",
2572 pHba->name, bus_no);
2573 continue;
2574 }
2575
2576 scsi_id = buf[1];
2577 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2578 pDev = pHba->channel[bus_no].device[scsi_id];
2579
2580 while(pDev) {
2581 if(pDev->scsi_lun == scsi_lun) {
2582 break;
2583 }
2584 pDev = pDev->next_lun;
2585 }
2586 if(!pDev ) {
2587 d = kmalloc(sizeof(struct i2o_device),
2588 GFP_ATOMIC);
2589 if(d==NULL)
2590 {
2591 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2592 return -ENOMEM;
2593 }
2594
2595 d->controller = pHba;
2596 d->next = NULL;
2597
2598 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2599
2600 d->flags = 0;
2601 adpt_i2o_report_hba_unit(pHba, d);
2602 adpt_i2o_install_device(pHba, d);
2603
2604 pDev = pHba->channel[bus_no].device[scsi_id];
2605 if( pDev == NULL){
2606 pDev =
2607 kzalloc(sizeof(struct adpt_device),
2608 GFP_ATOMIC);
2609 if(pDev == NULL) {
2610 return -ENOMEM;
2611 }
2612 pHba->channel[bus_no].device[scsi_id] = pDev;
2613 } else {
2614 while (pDev->next_lun) {
2615 pDev = pDev->next_lun;
2616 }
2617 pDev = pDev->next_lun =
2618 kzalloc(sizeof(struct adpt_device),
2619 GFP_ATOMIC);
2620 if(pDev == NULL) {
2621 return -ENOMEM;
2622 }
2623 }
2624 pDev->tid = d->lct_data.tid;
2625 pDev->scsi_channel = bus_no;
2626 pDev->scsi_id = scsi_id;
2627 pDev->scsi_lun = scsi_lun;
2628 pDev->pI2o_dev = d;
2629 d->owner = pDev;
2630 pDev->type = (buf[0])&0xff;
2631 pDev->flags = (buf[0]>>8)&0xff;
2632
2633 if(scsi_id > pHba->top_scsi_id){
2634 pHba->top_scsi_id = scsi_id;
2635 }
2636 if(scsi_lun > pHba->top_scsi_lun){
2637 pHba->top_scsi_lun = scsi_lun;
2638 }
2639 continue;
2640 }
2641
2642
2643 while(pDev) {
2644 if(pDev->scsi_lun == scsi_lun) {
2645 if(!scsi_device_online(pDev->pScsi_dev)) {
2646 printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2647 pHba->name,bus_no,scsi_id,scsi_lun);
2648 if (pDev->pScsi_dev) {
2649 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2650 }
2651 }
2652 d = pDev->pI2o_dev;
2653 if(d->lct_data.tid != tid) {
2654 pDev->tid = tid;
2655 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2656 if (pDev->pScsi_dev) {
2657 pDev->pScsi_dev->changed = TRUE;
2658 pDev->pScsi_dev->removable = TRUE;
2659 }
2660 }
2661
2662 pDev->state = DPTI_DEV_ONLINE;
2663 break;
2664 }
2665 pDev = pDev->next_lun;
2666 }
2667 }
2668 }
2669 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2670 pDev =(struct adpt_device*) pI2o_dev->owner;
2671 if(!pDev){
2672 continue;
2673 }
2674
2675
2676 if (pDev->state & DPTI_DEV_UNSCANNED){
2677 pDev->state = DPTI_DEV_OFFLINE;
2678 printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2679 if (pDev->pScsi_dev) {
2680 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2681 }
2682 }
2683 }
2684 return 0;
2685}
2686
2687static void adpt_fail_posted_scbs(adpt_hba* pHba)
2688{
2689 struct scsi_cmnd* cmd = NULL;
2690 struct scsi_device* d = NULL;
2691
2692 shost_for_each_device(d, pHba->host) {
2693 unsigned long flags;
2694 spin_lock_irqsave(&d->list_lock, flags);
2695 list_for_each_entry(cmd, &d->cmd_list, list) {
2696 if(cmd->serial_number == 0){
2697 continue;
2698 }
2699 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2700 cmd->scsi_done(cmd);
2701 }
2702 spin_unlock_irqrestore(&d->list_lock, flags);
2703 }
2704}
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717static int adpt_i2o_activate_hba(adpt_hba* pHba)
2718{
2719 int rcode;
2720
2721 if(pHba->initialized ) {
2722 if (adpt_i2o_status_get(pHba) < 0) {
2723 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2724 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2725 return rcode;
2726 }
2727 if (adpt_i2o_status_get(pHba) < 0) {
2728 printk(KERN_INFO "HBA not responding.\n");
2729 return -1;
2730 }
2731 }
2732
2733 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2734 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2735 return -1;
2736 }
2737
2738 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2739 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2740 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2741 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2742 adpt_i2o_reset_hba(pHba);
2743 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2744 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2745 return -1;
2746 }
2747 }
2748 } else {
2749 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2750 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2751 return rcode;
2752 }
2753
2754 }
2755
2756 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2757 return -1;
2758 }
2759
2760
2761
2762 if (adpt_i2o_hrt_get(pHba) < 0) {
2763 return -1;
2764 }
2765
2766 return 0;
2767}
2768
2769
2770
2771
2772
2773static int adpt_i2o_online_hba(adpt_hba* pHba)
2774{
2775 if (adpt_i2o_systab_send(pHba) < 0)
2776 return -1;
2777
2778
2779 if (adpt_i2o_enable_hba(pHba) < 0)
2780 return -1;
2781
2782
2783 return 0;
2784}
2785
2786static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2787{
2788 u32 __iomem *msg;
2789 ulong timeout = jiffies + 5*HZ;
2790
2791 while(m == EMPTY_QUEUE){
2792 rmb();
2793 m = readl(pHba->post_port);
2794 if(m != EMPTY_QUEUE){
2795 break;
2796 }
2797 if(time_after(jiffies,timeout)){
2798 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2799 return 2;
2800 }
2801 schedule_timeout_uninterruptible(1);
2802 }
2803 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2804 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2805 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2806 writel( 0,&msg[2]);
2807 wmb();
2808
2809 writel(m, pHba->post_port);
2810 wmb();
2811 return 0;
2812}
2813
2814static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2815{
2816 u8 *status;
2817 dma_addr_t addr;
2818 u32 __iomem *msg = NULL;
2819 int i;
2820 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2821 u32 m;
2822
2823 do {
2824 rmb();
2825 m = readl(pHba->post_port);
2826 if (m != EMPTY_QUEUE) {
2827 break;
2828 }
2829
2830 if(time_after(jiffies,timeout)){
2831 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2832 return -ETIMEDOUT;
2833 }
2834 schedule_timeout_uninterruptible(1);
2835 } while(m == EMPTY_QUEUE);
2836
2837 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2838
2839 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2840 if (!status) {
2841 adpt_send_nop(pHba, m);
2842 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2843 pHba->name);
2844 return -ENOMEM;
2845 }
2846 memset(status, 0, 4);
2847
2848 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2849 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2850 writel(0, &msg[2]);
2851 writel(0x0106, &msg[3]);
2852 writel(4096, &msg[4]);
2853 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]);
2854 writel(0xD0000004, &msg[6]);
2855 writel((u32)addr, &msg[7]);
2856
2857 writel(m, pHba->post_port);
2858 wmb();
2859
2860
2861 do {
2862 if (*status) {
2863 if (*status != 0x01 ) {
2864 break;
2865 }
2866 }
2867 rmb();
2868 if(time_after(jiffies,timeout)){
2869 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2870
2871
2872
2873
2874 return -ETIMEDOUT;
2875 }
2876 schedule_timeout_uninterruptible(1);
2877 } while (1);
2878
2879
2880
2881 if(*status != 0x04 ) {
2882 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2883 return -2;
2884 }
2885 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2886
2887 if(pHba->reply_pool != NULL) {
2888 dma_free_coherent(&pHba->pDev->dev,
2889 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2890 pHba->reply_pool, pHba->reply_pool_pa);
2891 }
2892
2893 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2894 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2895 &pHba->reply_pool_pa, GFP_KERNEL);
2896 if (!pHba->reply_pool) {
2897 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2898 return -ENOMEM;
2899 }
2900 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2901
2902 for(i = 0; i < pHba->reply_fifo_size; i++) {
2903 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2904 pHba->reply_port);
2905 wmb();
2906 }
2907 adpt_i2o_status_get(pHba);
2908 return 0;
2909}
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923static s32 adpt_i2o_status_get(adpt_hba* pHba)
2924{
2925 ulong timeout;
2926 u32 m;
2927 u32 __iomem *msg;
2928 u8 *status_block=NULL;
2929
2930 if(pHba->status_block == NULL) {
2931 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2932 sizeof(i2o_status_block),
2933 &pHba->status_block_pa, GFP_KERNEL);
2934 if(pHba->status_block == NULL) {
2935 printk(KERN_ERR
2936 "dpti%d: Get Status Block failed; Out of memory. \n",
2937 pHba->unit);
2938 return -ENOMEM;
2939 }
2940 }
2941 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2942 status_block = (u8*)(pHba->status_block);
2943 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2944 do {
2945 rmb();
2946 m = readl(pHba->post_port);
2947 if (m != EMPTY_QUEUE) {
2948 break;
2949 }
2950 if(time_after(jiffies,timeout)){
2951 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2952 pHba->name);
2953 return -ETIMEDOUT;
2954 }
2955 schedule_timeout_uninterruptible(1);
2956 } while(m==EMPTY_QUEUE);
2957
2958
2959 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2960
2961 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2962 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2963 writel(1, &msg[2]);
2964 writel(0, &msg[3]);
2965 writel(0, &msg[4]);
2966 writel(0, &msg[5]);
2967 writel( dma_low(pHba->status_block_pa), &msg[6]);
2968 writel( dma_high(pHba->status_block_pa), &msg[7]);
2969 writel(sizeof(i2o_status_block), &msg[8]);
2970
2971
2972 writel(m, pHba->post_port);
2973 wmb();
2974
2975 while(status_block[87]!=0xff){
2976 if(time_after(jiffies,timeout)){
2977 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2978 pHba->unit);
2979 return -ETIMEDOUT;
2980 }
2981 rmb();
2982 schedule_timeout_uninterruptible(1);
2983 }
2984
2985
2986 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2987 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2988 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2989 }
2990
2991 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2992 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2993 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2994 }
2995
2996
2997 if (dpt_dma64(pHba)) {
2998 pHba->sg_tablesize
2999 = ((pHba->status_block->inbound_frame_size * 4
3000 - 14 * sizeof(u32))
3001 / (sizeof(struct sg_simple_element) + sizeof(u32)));
3002 } else {
3003 pHba->sg_tablesize
3004 = ((pHba->status_block->inbound_frame_size * 4
3005 - 12 * sizeof(u32))
3006 / sizeof(struct sg_simple_element));
3007 }
3008 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3009 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3010 }
3011
3012
3013#ifdef DEBUG
3014 printk("dpti%d: State = ",pHba->unit);
3015 switch(pHba->status_block->iop_state) {
3016 case 0x01:
3017 printk("INIT\n");
3018 break;
3019 case 0x02:
3020 printk("RESET\n");
3021 break;
3022 case 0x04:
3023 printk("HOLD\n");
3024 break;
3025 case 0x05:
3026 printk("READY\n");
3027 break;
3028 case 0x08:
3029 printk("OPERATIONAL\n");
3030 break;
3031 case 0x10:
3032 printk("FAILED\n");
3033 break;
3034 case 0x11:
3035 printk("FAULTED\n");
3036 break;
3037 default:
3038 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3039 }
3040#endif
3041 return 0;
3042}
3043
3044
3045
3046
3047static int adpt_i2o_lct_get(adpt_hba* pHba)
3048{
3049 u32 msg[8];
3050 int ret;
3051 u32 buf[16];
3052
3053 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3054 pHba->lct_size = pHba->status_block->expected_lct_size;
3055 }
3056 do {
3057 if (pHba->lct == NULL) {
3058 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3059 pHba->lct_size, &pHba->lct_pa,
3060 GFP_ATOMIC);
3061 if(pHba->lct == NULL) {
3062 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3063 pHba->name);
3064 return -ENOMEM;
3065 }
3066 }
3067 memset(pHba->lct, 0, pHba->lct_size);
3068
3069 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3070 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3071 msg[2] = 0;
3072 msg[3] = 0;
3073 msg[4] = 0xFFFFFFFF;
3074 msg[5] = 0x00000000;
3075 msg[6] = 0xD0000000|pHba->lct_size;
3076 msg[7] = (u32)pHba->lct_pa;
3077
3078 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3079 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3080 pHba->name, ret);
3081 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3082 return ret;
3083 }
3084
3085 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3086 pHba->lct_size = pHba->lct->table_size << 2;
3087 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3088 pHba->lct, pHba->lct_pa);
3089 pHba->lct = NULL;
3090 }
3091 } while (pHba->lct == NULL);
3092
3093 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3094
3095
3096
3097 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3098 pHba->FwDebugBufferSize = buf[1];
3099 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3100 pHba->FwDebugBufferSize);
3101 if (pHba->FwDebugBuffer_P) {
3102 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3103 FW_DEBUG_FLAGS_OFFSET;
3104 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3105 FW_DEBUG_BLED_OFFSET;
3106 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3107 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3108 FW_DEBUG_STR_LENGTH_OFFSET;
3109 pHba->FwDebugBuffer_P += buf[2];
3110 pHba->FwDebugFlags = 0;
3111 }
3112 }
3113
3114 return 0;
3115}
3116
3117static int adpt_i2o_build_sys_table(void)
3118{
3119 adpt_hba* pHba = hba_chain;
3120 int count = 0;
3121
3122 if (sys_tbl)
3123 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3124 sys_tbl, sys_tbl_pa);
3125
3126 sys_tbl_len = sizeof(struct i2o_sys_tbl) +
3127 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3128
3129 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3130 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3131 if (!sys_tbl) {
3132 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3133 return -ENOMEM;
3134 }
3135 memset(sys_tbl, 0, sys_tbl_len);
3136
3137 sys_tbl->num_entries = hba_count;
3138 sys_tbl->version = I2OVERSION;
3139 sys_tbl->change_ind = sys_tbl_ind++;
3140
3141 for(pHba = hba_chain; pHba; pHba = pHba->next) {
3142 u64 addr;
3143
3144 if (adpt_i2o_status_get(pHba)) {
3145 sys_tbl->num_entries--;
3146 continue;
3147 }
3148
3149 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3150 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3151 sys_tbl->iops[count].seg_num = 0;
3152 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3153 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3154 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3155 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3156 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1;
3157 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3158 addr = pHba->base_addr_phys + 0x40;
3159 sys_tbl->iops[count].inbound_low = dma_low(addr);
3160 sys_tbl->iops[count].inbound_high = dma_high(addr);
3161
3162 count++;
3163 }
3164
3165#ifdef DEBUG
3166{
3167 u32 *table = (u32*)sys_tbl;
3168 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3169 for(count = 0; count < (sys_tbl_len >>2); count++) {
3170 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3171 count, table[count]);
3172 }
3173}
3174#endif
3175
3176 return 0;
3177}
3178
3179
3180
3181
3182
3183
3184static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3185{
3186 char buf[64];
3187 int unit = d->lct_data.tid;
3188
3189 printk(KERN_INFO "TID %3.3d ", unit);
3190
3191 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3192 {
3193 buf[16]=0;
3194 printk(" Vendor: %-12.12s", buf);
3195 }
3196 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3197 {
3198 buf[16]=0;
3199 printk(" Device: %-12.12s", buf);
3200 }
3201 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3202 {
3203 buf[8]=0;
3204 printk(" Rev: %-12.12s\n", buf);
3205 }
3206#ifdef DEBUG
3207 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3208 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3209 printk(KERN_INFO "\tFlags: ");
3210
3211 if(d->lct_data.device_flags&(1<<0))
3212 printk("C");
3213 if(d->lct_data.device_flags&(1<<1))
3214 printk("U");
3215 if(!(d->lct_data.device_flags&(1<<4)))
3216 printk("P");
3217 if(!(d->lct_data.device_flags&(1<<5)))
3218 printk("M");
3219 printk("\n");
3220#endif
3221}
3222
3223#ifdef DEBUG
3224
3225
3226
3227static const char *adpt_i2o_get_class_name(int class)
3228{
3229 int idx = 16;
3230 static char *i2o_class_name[] = {
3231 "Executive",
3232 "Device Driver Module",
3233 "Block Device",
3234 "Tape Device",
3235 "LAN Interface",
3236 "WAN Interface",
3237 "Fibre Channel Port",
3238 "Fibre Channel Device",
3239 "SCSI Device",
3240 "ATE Port",
3241 "ATE Device",
3242 "Floppy Controller",
3243 "Floppy Device",
3244 "Secondary Bus Port",
3245 "Peer Transport Agent",
3246 "Peer Transport",
3247 "Unknown"
3248 };
3249
3250 switch(class&0xFFF) {
3251 case I2O_CLASS_EXECUTIVE:
3252 idx = 0; break;
3253 case I2O_CLASS_DDM:
3254 idx = 1; break;
3255 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3256 idx = 2; break;
3257 case I2O_CLASS_SEQUENTIAL_STORAGE:
3258 idx = 3; break;
3259 case I2O_CLASS_LAN:
3260 idx = 4; break;
3261 case I2O_CLASS_WAN:
3262 idx = 5; break;
3263 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3264 idx = 6; break;
3265 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3266 idx = 7; break;
3267 case I2O_CLASS_SCSI_PERIPHERAL:
3268 idx = 8; break;
3269 case I2O_CLASS_ATE_PORT:
3270 idx = 9; break;
3271 case I2O_CLASS_ATE_PERIPHERAL:
3272 idx = 10; break;
3273 case I2O_CLASS_FLOPPY_CONTROLLER:
3274 idx = 11; break;
3275 case I2O_CLASS_FLOPPY_DEVICE:
3276 idx = 12; break;
3277 case I2O_CLASS_BUS_ADAPTER_PORT:
3278 idx = 13; break;
3279 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3280 idx = 14; break;
3281 case I2O_CLASS_PEER_TRANSPORT:
3282 idx = 15; break;
3283 }
3284 return i2o_class_name[idx];
3285}
3286#endif
3287
3288
3289static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3290{
3291 u32 msg[6];
3292 int ret, size = sizeof(i2o_hrt);
3293
3294 do {
3295 if (pHba->hrt == NULL) {
3296 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3297 size, &pHba->hrt_pa, GFP_KERNEL);
3298 if (pHba->hrt == NULL) {
3299 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3300 return -ENOMEM;
3301 }
3302 }
3303
3304 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3305 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3306 msg[2]= 0;
3307 msg[3]= 0;
3308 msg[4]= (0xD0000000 | size);
3309 msg[5]= (u32)pHba->hrt_pa;
3310
3311 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3312 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3313 return ret;
3314 }
3315
3316 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3317 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3318 dma_free_coherent(&pHba->pDev->dev, size,
3319 pHba->hrt, pHba->hrt_pa);
3320 size = newsize;
3321 pHba->hrt = NULL;
3322 }
3323 } while(pHba->hrt == NULL);
3324 return 0;
3325}
3326
3327
3328
3329
3330static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3331 int group, int field, void *buf, int buflen)
3332{
3333 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3334 u8 *opblk_va;
3335 dma_addr_t opblk_pa;
3336 u8 *resblk_va;
3337 dma_addr_t resblk_pa;
3338
3339 int size;
3340
3341
3342 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3343 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3344 if (resblk_va == NULL) {
3345 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3346 return -ENOMEM;
3347 }
3348
3349 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3350 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3351 if (opblk_va == NULL) {
3352 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3353 resblk_va, resblk_pa);
3354 printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
3355 pHba->name);
3356 return -ENOMEM;
3357 }
3358 if (field == -1)
3359 opblk[4] = -1;
3360
3361 memcpy(opblk_va, opblk, sizeof(opblk));
3362 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3363 opblk_va, opblk_pa, sizeof(opblk),
3364 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3365 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3366 if (size == -ETIME) {
3367 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3368 resblk_va, resblk_pa);
3369 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3370 return -ETIME;
3371 } else if (size == -EINTR) {
3372 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3373 resblk_va, resblk_pa);
3374 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3375 return -EINTR;
3376 }
3377
3378 memcpy(buf, resblk_va+8, buflen);
3379
3380 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3381 resblk_va, resblk_pa);
3382 if (size < 0)
3383 return size;
3384
3385 return buflen;
3386}
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3398 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3399 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3400{
3401 u32 msg[9];
3402 u32 *res = (u32 *)resblk_va;
3403 int wait_status;
3404
3405 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3406 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3407 msg[2] = 0;
3408 msg[3] = 0;
3409 msg[4] = 0;
3410 msg[5] = 0x54000000 | oplen;
3411 msg[6] = (u32)opblk_pa;
3412 msg[7] = 0xD0000000 | reslen;
3413 msg[8] = (u32)resblk_pa;
3414
3415 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3416 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3417 return wait_status;
3418 }
3419
3420 if (res[1]&0x00FF0000) {
3421 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3422 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3423 pHba->name,
3424 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3425 : "PARAMS_GET",
3426 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3427 return -((res[1] >> 16) & 0xFF);
3428 }
3429
3430 return 4 + ((res[1] & 0x0000FFFF) << 2);
3431}
3432
3433
3434static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3435{
3436 u32 msg[4];
3437 int ret;
3438
3439 adpt_i2o_status_get(pHba);
3440
3441
3442
3443 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3444 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3445 return 0;
3446 }
3447
3448 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3449 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3450 msg[2] = 0;
3451 msg[3] = 0;
3452
3453 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3454 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3455 pHba->unit, -ret);
3456 } else {
3457 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3458 }
3459
3460 adpt_i2o_status_get(pHba);
3461 return ret;
3462}
3463
3464
3465
3466
3467
3468static int adpt_i2o_enable_hba(adpt_hba* pHba)
3469{
3470 u32 msg[4];
3471 int ret;
3472
3473 adpt_i2o_status_get(pHba);
3474 if(!pHba->status_block){
3475 return -ENOMEM;
3476 }
3477
3478 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3479 return 0;
3480
3481 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3482 return -EINVAL;
3483
3484 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3485 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3486 msg[2]= 0;
3487 msg[3]= 0;
3488
3489 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3490 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3491 pHba->name, ret);
3492 } else {
3493 PDEBUG("%s: Enabled.\n", pHba->name);
3494 }
3495
3496 adpt_i2o_status_get(pHba);
3497 return ret;
3498}
3499
3500
3501static int adpt_i2o_systab_send(adpt_hba* pHba)
3502{
3503 u32 msg[12];
3504 int ret;
3505
3506 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3507 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3508 msg[2] = 0;
3509 msg[3] = 0;
3510 msg[4] = (0<<16) | ((pHba->unit+2) << 12);
3511 msg[5] = 0;
3512
3513
3514
3515
3516
3517
3518 msg[6] = 0x54000000 | sys_tbl_len;
3519 msg[7] = (u32)sys_tbl_pa;
3520 msg[8] = 0x54000000 | 0;
3521 msg[9] = 0;
3522 msg[10] = 0xD4000000 | 0;
3523 msg[11] = 0;
3524
3525 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3526 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3527 pHba->name, ret);
3528 }
3529#ifdef DEBUG
3530 else {
3531 PINFO("%s: SysTab set.\n", pHba->name);
3532 }
3533#endif
3534
3535 return ret;
3536}
3537
3538
3539
3540
3541
3542
3543
3544
3545#ifdef UARTDELAY
3546
3547static static void adpt_delay(int millisec)
3548{
3549 int i;
3550 for (i = 0; i < millisec; i++) {
3551 udelay(1000);
3552 }
3553}
3554
3555#endif
3556
3557static struct scsi_host_template driver_template = {
3558 .module = THIS_MODULE,
3559 .name = "dpt_i2o",
3560 .proc_name = "dpt_i2o",
3561 .show_info = adpt_show_info,
3562 .info = adpt_info,
3563 .queuecommand = adpt_queue,
3564 .eh_abort_handler = adpt_abort,
3565 .eh_device_reset_handler = adpt_device_reset,
3566 .eh_bus_reset_handler = adpt_bus_reset,
3567 .eh_host_reset_handler = adpt_reset,
3568 .bios_param = adpt_bios_param,
3569 .slave_configure = adpt_slave_configure,
3570 .can_queue = MAX_TO_IOP_MESSAGES,
3571 .this_id = 7,
3572 .use_clustering = ENABLE_CLUSTERING,
3573};
3574
3575static int __init adpt_init(void)
3576{
3577 int error;
3578 adpt_hba *pHba, *next;
3579
3580 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3581
3582 error = adpt_detect(&driver_template);
3583 if (error < 0)
3584 return error;
3585 if (hba_chain == NULL)
3586 return -ENODEV;
3587
3588 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3589 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3590 if (error)
3591 goto fail;
3592 scsi_scan_host(pHba->host);
3593 }
3594 return 0;
3595fail:
3596 for (pHba = hba_chain; pHba; pHba = next) {
3597 next = pHba->next;
3598 scsi_remove_host(pHba->host);
3599 }
3600 return error;
3601}
3602
3603static void __exit adpt_exit(void)
3604{
3605 adpt_hba *pHba, *next;
3606
3607 for (pHba = hba_chain; pHba; pHba = next) {
3608 next = pHba->next;
3609 adpt_release(pHba);
3610 }
3611}
3612
3613module_init(adpt_init);
3614module_exit(adpt_exit);
3615
3616MODULE_LICENSE("GPL");
3617