1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/kernel.h>
33#include <linux/init.h>
34#include <linux/types.h>
35#include <linux/pci.h>
36#include <linux/spinlock.h>
37#include <linux/blkdev.h>
38#include <linux/delay.h>
39#include <linux/completion.h>
40#include <linux/time.h>
41#include <linux/interrupt.h>
42
43#include <scsi/scsi_host.h>
44
45#include "aacraid.h"
46
47static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id)
48{
49 struct aac_dev *dev = dev_id;
50 unsigned long bellbits;
51 u8 intstat = rx_readb(dev, MUnit.OISR);
52
53
54
55
56
57
58
59 if (likely(intstat & ~(dev->OIMR))) {
60 bellbits = rx_readl(dev, OutboundDoorbellReg);
61 if (unlikely(bellbits & DoorBellPrintfReady)) {
62 aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5]));
63 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
64 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
65 }
66 else if (unlikely(bellbits & DoorBellAdapterNormCmdReady)) {
67 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
68 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
69 }
70 else if (likely(bellbits & DoorBellAdapterNormRespReady)) {
71 rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
72 aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
73 }
74 else if (unlikely(bellbits & DoorBellAdapterNormCmdNotFull)) {
75 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
76 }
77 else if (unlikely(bellbits & DoorBellAdapterNormRespNotFull)) {
78 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
79 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
80 }
81 return IRQ_HANDLED;
82 }
83 return IRQ_NONE;
84}
85
86static irqreturn_t aac_rx_intr_message(int irq, void *dev_id)
87{
88 int isAif, isFastResponse, isSpecial;
89 struct aac_dev *dev = dev_id;
90 u32 Index = rx_readl(dev, MUnit.OutboundQueue);
91 if (unlikely(Index == 0xFFFFFFFFL))
92 Index = rx_readl(dev, MUnit.OutboundQueue);
93 if (likely(Index != 0xFFFFFFFFL)) {
94 do {
95 isAif = isFastResponse = isSpecial = 0;
96 if (Index & 0x00000002L) {
97 isAif = 1;
98 if (Index == 0xFFFFFFFEL)
99 isSpecial = 1;
100 Index &= ~0x00000002L;
101 } else {
102 if (Index & 0x00000001L)
103 isFastResponse = 1;
104 Index >>= 2;
105 }
106 if (!isSpecial) {
107 if (unlikely(aac_intr_normal(dev,
108 Index, isAif,
109 isFastResponse, NULL))) {
110 rx_writel(dev,
111 MUnit.OutboundQueue,
112 Index);
113 rx_writel(dev,
114 MUnit.ODR,
115 DoorBellAdapterNormRespReady);
116 }
117 }
118 Index = rx_readl(dev, MUnit.OutboundQueue);
119 } while (Index != 0xFFFFFFFFL);
120 return IRQ_HANDLED;
121 }
122 return IRQ_NONE;
123}
124
125
126
127
128
129
130static void aac_rx_disable_interrupt(struct aac_dev *dev)
131{
132 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
133}
134
135
136
137
138
139
140static void aac_rx_enable_interrupt_producer(struct aac_dev *dev)
141{
142 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
143}
144
145
146
147
148
149
150static void aac_rx_enable_interrupt_message(struct aac_dev *dev)
151{
152 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
153}
154
155
156
157
158
159
160
161
162
163
164
165
166static int rx_sync_cmd(struct aac_dev *dev, u32 command,
167 u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
168 u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
169{
170 unsigned long start;
171 int ok;
172
173
174
175 writel(command, &dev->IndexRegs->Mailbox[0]);
176
177
178
179 writel(p1, &dev->IndexRegs->Mailbox[1]);
180 writel(p2, &dev->IndexRegs->Mailbox[2]);
181 writel(p3, &dev->IndexRegs->Mailbox[3]);
182 writel(p4, &dev->IndexRegs->Mailbox[4]);
183
184
185
186 rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
187
188
189
190 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
191
192
193
194
195 rx_readb (dev, MUnit.OIMR);
196
197
198
199 rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
200
201 ok = 0;
202 start = jiffies;
203
204
205
206
207 while (time_before(jiffies, start+30*HZ))
208 {
209 udelay(5);
210
211
212
213 if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
214
215
216
217 rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
218 ok = 1;
219 break;
220 }
221
222
223
224 msleep(1);
225 }
226 if (unlikely(ok != 1)) {
227
228
229
230 aac_adapter_enable_int(dev);
231 return -ETIMEDOUT;
232 }
233
234
235
236 if (status)
237 *status = readl(&dev->IndexRegs->Mailbox[0]);
238 if (r1)
239 *r1 = readl(&dev->IndexRegs->Mailbox[1]);
240 if (r2)
241 *r2 = readl(&dev->IndexRegs->Mailbox[2]);
242 if (r3)
243 *r3 = readl(&dev->IndexRegs->Mailbox[3]);
244 if (r4)
245 *r4 = readl(&dev->IndexRegs->Mailbox[4]);
246
247
248
249 rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
250
251
252
253 aac_adapter_enable_int(dev);
254 return 0;
255
256}
257
258
259
260
261
262
263
264
265static void aac_rx_interrupt_adapter(struct aac_dev *dev)
266{
267 rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
268}
269
270
271
272
273
274
275
276
277
278
279static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
280{
281 switch (event) {
282
283 case AdapNormCmdQue:
284 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
285 break;
286 case HostNormRespNotFull:
287 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
288 break;
289 case AdapNormRespQue:
290 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
291 break;
292 case HostNormCmdNotFull:
293 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
294 break;
295 case HostShutdown:
296 break;
297 case FastIo:
298 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
299 break;
300 case AdapPrintfDone:
301 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
302 break;
303 default:
304 BUG();
305 break;
306 }
307}
308
309
310
311
312
313
314
315
316static void aac_rx_start_adapter(struct aac_dev *dev)
317{
318 struct aac_init *init;
319
320 init = dev->init;
321 init->HostElapsedSeconds = cpu_to_le32(get_seconds());
322
323 rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
324 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
325}
326
327
328
329
330
331
332
333
334static int aac_rx_check_health(struct aac_dev *dev)
335{
336 u32 status = rx_readl(dev, MUnit.OMRx[0]);
337
338
339
340
341 if (unlikely(status & SELF_TEST_FAILED))
342 return -1;
343
344
345
346 if (unlikely(status & KERNEL_PANIC)) {
347 char * buffer;
348 struct POSTSTATUS {
349 __le32 Post_Command;
350 __le32 Post_Address;
351 } * post;
352 dma_addr_t paddr, baddr;
353 int ret;
354
355 if (likely((status & 0xFF000000L) == 0xBC000000L))
356 return (status >> 16) & 0xFF;
357 buffer = pci_alloc_consistent(dev->pdev, 512, &baddr);
358 ret = -2;
359 if (unlikely(buffer == NULL))
360 return ret;
361 post = pci_alloc_consistent(dev->pdev,
362 sizeof(struct POSTSTATUS), &paddr);
363 if (unlikely(post == NULL)) {
364 pci_free_consistent(dev->pdev, 512, buffer, baddr);
365 return ret;
366 }
367 memset(buffer, 0, 512);
368 post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS);
369 post->Post_Address = cpu_to_le32(baddr);
370 rx_writel(dev, MUnit.IMRx[0], paddr);
371 rx_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, 0, 0, 0, 0, 0,
372 NULL, NULL, NULL, NULL, NULL);
373 pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
374 post, paddr);
375 if (likely((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X')))) {
376 ret = (hex_to_bin(buffer[2]) << 4) +
377 hex_to_bin(buffer[3]);
378 }
379 pci_free_consistent(dev->pdev, 512, buffer, baddr);
380 return ret;
381 }
382
383
384
385 if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
386 return -3;
387
388
389
390 return 0;
391}
392
393
394
395
396
397
398
399int aac_rx_deliver_producer(struct fib * fib)
400{
401 struct aac_dev *dev = fib->dev;
402 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
403 u32 Index;
404 unsigned long nointr = 0;
405
406 aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr);
407
408 atomic_inc(&q->numpending);
409 *(q->headers.producer) = cpu_to_le32(Index + 1);
410 if (!(nointr & aac_config.irq_mod))
411 aac_adapter_notify(dev, AdapNormCmdQueue);
412
413 return 0;
414}
415
416
417
418
419
420
421
422static int aac_rx_deliver_message(struct fib * fib)
423{
424 struct aac_dev *dev = fib->dev;
425 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
426 u32 Index;
427 u64 addr;
428 volatile void __iomem *device;
429
430 unsigned long count = 10000000L;
431 atomic_inc(&q->numpending);
432 for(;;) {
433 Index = rx_readl(dev, MUnit.InboundQueue);
434 if (unlikely(Index == 0xFFFFFFFFL))
435 Index = rx_readl(dev, MUnit.InboundQueue);
436 if (likely(Index != 0xFFFFFFFFL))
437 break;
438 if (--count == 0) {
439 atomic_dec(&q->numpending);
440 return -ETIMEDOUT;
441 }
442 udelay(5);
443 }
444 device = dev->base + Index;
445 addr = fib->hw_fib_pa;
446 writel((u32)(addr & 0xffffffff), device);
447 device += sizeof(u32);
448 writel((u32)(addr >> 32), device);
449 device += sizeof(u32);
450 writel(le16_to_cpu(fib->hw_fib_va->header.Size), device);
451 rx_writel(dev, MUnit.InboundQueue, Index);
452 return 0;
453}
454
455
456
457
458
459
460static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
461{
462 if (!size) {
463 iounmap(dev->regs.rx);
464 return 0;
465 }
466 dev->base = dev->regs.rx = ioremap(dev->base_start, size);
467 if (dev->base == NULL)
468 return -1;
469 dev->IndexRegs = &dev->regs.rx->IndexRegs;
470 return 0;
471}
472
473static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
474{
475 u32 var = 0;
476
477 if (!(dev->supplement_adapter_info.SupportedOptions2 &
478 AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) {
479 if (bled)
480 printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
481 dev->name, dev->id, bled);
482 else {
483 bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
484 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
485 if (!bled && (var != 0x00000001) && (var != 0x3803000F))
486 bled = -EINVAL;
487 }
488 if (bled && (bled != -ETIMEDOUT))
489 bled = aac_adapter_sync_cmd(dev, IOP_RESET,
490 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
491
492 if (bled && (bled != -ETIMEDOUT))
493 return -EINVAL;
494 }
495 if (bled && (var == 0x3803000F)) {
496 rx_writel(dev, MUnit.reserved2, 3);
497 msleep(5000);
498 var = 0x00000001;
499 }
500 if (bled && (var != 0x00000001))
501 return -EINVAL;
502 ssleep(5);
503 if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC)
504 return -ENODEV;
505 if (startup_timeout < 300)
506 startup_timeout = 300;
507 return 0;
508}
509
510
511
512
513
514
515
516int aac_rx_select_comm(struct aac_dev *dev, int comm)
517{
518 switch (comm) {
519 case AAC_COMM_PRODUCER:
520 dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_producer;
521 dev->a_ops.adapter_intr = aac_rx_intr_producer;
522 dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
523 break;
524 case AAC_COMM_MESSAGE:
525 dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_message;
526 dev->a_ops.adapter_intr = aac_rx_intr_message;
527 dev->a_ops.adapter_deliver = aac_rx_deliver_message;
528 break;
529 default:
530 return 1;
531 }
532 return 0;
533}
534
535
536
537
538
539
540
541
542
543
544int _aac_rx_init(struct aac_dev *dev)
545{
546 unsigned long start;
547 unsigned long status;
548 int restart = 0;
549 int instance = dev->id;
550 const char * name = dev->name;
551
552 if (aac_adapter_ioremap(dev, dev->base_size)) {
553 printk(KERN_WARNING "%s: unable to map adapter.\n", name);
554 goto error_iounmap;
555 }
556
557
558 dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
559 dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
560 dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
561 if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) &&
562 !aac_rx_restart_adapter(dev, 0))
563
564 while ((++restart < 512) &&
565 (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
566
567
568
569 status = rx_readl(dev, MUnit.OMRx[0]);
570 if (status & KERNEL_PANIC) {
571 if (aac_rx_restart_adapter(dev, aac_rx_check_health(dev)))
572 goto error_iounmap;
573 ++restart;
574 }
575
576
577
578 status = rx_readl(dev, MUnit.OMRx[0]);
579 if (status & SELF_TEST_FAILED) {
580 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
581 goto error_iounmap;
582 }
583
584
585
586 if (status & MONITOR_PANIC) {
587 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
588 goto error_iounmap;
589 }
590 start = jiffies;
591
592
593
594 while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING))
595 {
596 if ((restart &&
597 (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
598 time_after(jiffies, start+HZ*startup_timeout)) {
599 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
600 dev->name, instance, status);
601 goto error_iounmap;
602 }
603 if (!restart &&
604 ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
605 time_after(jiffies, start + HZ *
606 ((startup_timeout > 60)
607 ? (startup_timeout - 60)
608 : (startup_timeout / 2))))) {
609 if (likely(!aac_rx_restart_adapter(dev, aac_rx_check_health(dev))))
610 start = jiffies;
611 ++restart;
612 }
613 msleep(1);
614 }
615 if (restart && aac_commit)
616 aac_commit = 1;
617
618
619
620 dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
621 dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
622 dev->a_ops.adapter_notify = aac_rx_notify_adapter;
623 dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
624 dev->a_ops.adapter_check_health = aac_rx_check_health;
625 dev->a_ops.adapter_restart = aac_rx_restart_adapter;
626
627
628
629
630
631 aac_adapter_comm(dev, AAC_COMM_PRODUCER);
632 aac_adapter_disable_int(dev);
633 rx_writel(dev, MUnit.ODR, 0xffffffff);
634 aac_adapter_enable_int(dev);
635
636 if (aac_init_adapter(dev) == NULL)
637 goto error_iounmap;
638 aac_adapter_comm(dev, dev->comm_interface);
639 dev->sync_mode = 0;
640 dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
641 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
642 IRQF_SHARED, "aacraid", dev) < 0) {
643 if (dev->msi)
644 pci_disable_msi(dev->pdev);
645 printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
646 name, instance);
647 goto error_iounmap;
648 }
649 dev->dbg_base = dev->base_start;
650 dev->dbg_base_mapped = dev->base;
651 dev->dbg_size = dev->base_size;
652
653 aac_adapter_enable_int(dev);
654
655
656
657
658 aac_rx_start_adapter(dev);
659
660 return 0;
661
662error_iounmap:
663
664 return -1;
665}
666
667int aac_rx_init(struct aac_dev *dev)
668{
669
670
671
672 dev->a_ops.adapter_ioremap = aac_rx_ioremap;
673 dev->a_ops.adapter_comm = aac_rx_select_comm;
674
675 return _aac_rx_init(dev);
676}
677