1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/delay.h>
34#include <linux/types.h>
35#include <linux/kernel.h>
36#include <linux/spinlock.h>
37#include <linux/init.h>
38#include <linux/pci.h>
39#include <linux/ioport.h>
40#include <linux/slab.h>
41
42#include <asm/byteorder.h>
43#include <asm/pdc.h>
44#include <asm/pdcpat.h>
45#include <asm/page.h>
46
47#include <asm/ropes.h>
48#include <asm/hardware.h>
49#include <asm/parisc-device.h>
50#include <asm/io.h>
51
52#undef DEBUG_LBA
53#undef DEBUG_LBA_PORT
54#undef DEBUG_LBA_CFG
55#undef DEBUG_LBA_PAT
56
57#undef FBB_SUPPORT
58
59
60#ifdef DEBUG_LBA
61#define DBG(x...) printk(x)
62#else
63#define DBG(x...)
64#endif
65
66#ifdef DEBUG_LBA_PORT
67#define DBG_PORT(x...) printk(x)
68#else
69#define DBG_PORT(x...)
70#endif
71
72#ifdef DEBUG_LBA_CFG
73#define DBG_CFG(x...) printk(x)
74#else
75#define DBG_CFG(x...)
76#endif
77
78#ifdef DEBUG_LBA_PAT
79#define DBG_PAT(x...) printk(x)
80#else
81#define DBG_PAT(x...)
82#endif
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99#define MODULE_NAME "LBA"
100
101
102#define LBA_PORT_BASE (PCI_F_EXTEND | 0xfee00000UL)
103static void __iomem *astro_iop_base __read_mostly;
104
105static u32 lba_t32;
106
107
108#define LBA_FLAG_SKIP_PROBE 0x10
109
110#define LBA_SKIP_PROBE(d) ((d)->flags & LBA_FLAG_SKIP_PROBE)
111
112
113
114#define LBA_DEV(d) ((struct lba_device *) (d))
115
116
117
118
119
120
121#define LBA_MAX_NUM_BUSES 8
122
123
124
125
126
127
128
129#define READ_U8(addr) __raw_readb(addr)
130#define READ_U16(addr) __raw_readw(addr)
131#define READ_U32(addr) __raw_readl(addr)
132#define WRITE_U8(value, addr) __raw_writeb(value, addr)
133#define WRITE_U16(value, addr) __raw_writew(value, addr)
134#define WRITE_U32(value, addr) __raw_writel(value, addr)
135
136#define READ_REG8(addr) readb(addr)
137#define READ_REG16(addr) readw(addr)
138#define READ_REG32(addr) readl(addr)
139#define READ_REG64(addr) readq(addr)
140#define WRITE_REG8(value, addr) writeb(value, addr)
141#define WRITE_REG16(value, addr) writew(value, addr)
142#define WRITE_REG32(value, addr) writel(value, addr)
143
144
145#define LBA_CFG_TOK(bus,dfn) ((u32) ((bus)<<16 | (dfn)<<8))
146#define LBA_CFG_BUS(tok) ((u8) ((tok)>>16))
147#define LBA_CFG_DEV(tok) ((u8) ((tok)>>11) & 0x1f)
148#define LBA_CFG_FUNC(tok) ((u8) ((tok)>>8 ) & 0x7)
149
150
151
152
153
154
155#define ROPES_PER_IOC 8
156#define LBA_NUM(x) ((((unsigned long) x) >> 13) & (ROPES_PER_IOC-1))
157
158
159static void
160lba_dump_res(struct resource *r, int d)
161{
162 int i;
163
164 if (NULL == r)
165 return;
166
167 printk(KERN_DEBUG "(%p)", r->parent);
168 for (i = d; i ; --i) printk(" ");
169 printk(KERN_DEBUG "%p [%lx,%lx]/%lx\n", r,
170 (long)r->start, (long)r->end, r->flags);
171 lba_dump_res(r->child, d+2);
172 lba_dump_res(r->sibling, d);
173}
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190static int lba_device_present(u8 bus, u8 dfn, struct lba_device *d)
191{
192 u8 first_bus = d->hba.hba_bus->busn_res.start;
193 u8 last_sub_bus = d->hba.hba_bus->busn_res.end;
194
195 if ((bus < first_bus) ||
196 (bus > last_sub_bus) ||
197 ((bus - first_bus) >= LBA_MAX_NUM_BUSES)) {
198 return 0;
199 }
200
201 return 1;
202}
203
204
205
206#define LBA_CFG_SETUP(d, tok) { \
207 \
208 error_config = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG); \
209\
210 \
211 status_control = READ_REG32(d->hba.base_addr + LBA_STAT_CTL); \
212\
213
214
215 \
216 \
217 arb_mask = READ_REG32(d->hba.base_addr + LBA_ARB_MASK); \
218\
219
220
221
222 \
223 WRITE_REG32(0x1, d->hba.base_addr + LBA_ARB_MASK); \
224\
225
226
227
228 \
229 WRITE_REG32(error_config | LBA_SMART_MODE, d->hba.base_addr + LBA_ERROR_CONFIG); \
230}
231
232
233#define LBA_CFG_PROBE(d, tok) { \
234
235
236
237 \
238 WRITE_REG32(tok | PCI_VENDOR_ID, (d)->hba.base_addr + LBA_PCI_CFG_ADDR);\
239
240
241
242 \
243 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
244
245
246
247 \
248 WRITE_REG32(~0, (d)->hba.base_addr + LBA_PCI_CFG_DATA); \
249
250
251
252 \
253 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
254}
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281#define LBA_MASTER_ABORT_ERROR 0xc
282#define LBA_FATAL_ERROR 0x10
283
284#define LBA_CFG_MASTER_ABORT_CHECK(d, base, tok, error) { \
285 u32 error_status = 0; \
286
287
288
289 \
290 WRITE_REG32(status_control | CLEAR_ERRLOG_ENABLE, base + LBA_STAT_CTL); \
291 error_status = READ_REG32(base + LBA_ERROR_STATUS); \
292 if ((error_status & 0x1f) != 0) { \
293
294
295 \
296 error = 1; \
297 if ((error_status & LBA_FATAL_ERROR) == 0) { \
298
299
300
301 \
302 WRITE_REG32(status_control | CLEAR_ERRLOG, base + LBA_STAT_CTL); \
303 } \
304 } \
305}
306
307#define LBA_CFG_TR4_ADDR_SETUP(d, addr) \
308 WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR);
309
310#define LBA_CFG_ADDR_SETUP(d, addr) { \
311 WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
312
313
314
315 \
316 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
317}
318
319
320#define LBA_CFG_RESTORE(d, base) { \
321
322
323 \
324 WRITE_REG32(status_control, base + LBA_STAT_CTL); \
325
326
327 \
328 WRITE_REG32(error_config, base + LBA_ERROR_CONFIG); \
329
330
331 \
332 WRITE_REG32(arb_mask, base + LBA_ARB_MASK); \
333}
334
335
336
337static unsigned int
338lba_rd_cfg(struct lba_device *d, u32 tok, u8 reg, u32 size)
339{
340 u32 data = ~0U;
341 int error = 0;
342 u32 arb_mask = 0;
343 u32 error_config = 0;
344 u32 status_control = 0;
345
346 LBA_CFG_SETUP(d, tok);
347 LBA_CFG_PROBE(d, tok);
348 LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error);
349 if (!error) {
350 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
351
352 LBA_CFG_ADDR_SETUP(d, tok | reg);
353 switch (size) {
354 case 1: data = (u32) READ_REG8(data_reg + (reg & 3)); break;
355 case 2: data = (u32) READ_REG16(data_reg+ (reg & 2)); break;
356 case 4: data = READ_REG32(data_reg); break;
357 }
358 }
359 LBA_CFG_RESTORE(d, d->hba.base_addr);
360 return(data);
361}
362
363
364static int elroy_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
365{
366 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
367 u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
368 u32 tok = LBA_CFG_TOK(local_bus, devfn);
369 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
370
371 if ((pos > 255) || (devfn > 255))
372 return -EINVAL;
373
374
375 {
376
377
378 *data = lba_rd_cfg(d, tok, pos, size);
379 DBG_CFG("%s(%x+%2x) -> 0x%x (a)\n", __func__, tok, pos, *data);
380 return 0;
381 }
382
383 if (LBA_SKIP_PROBE(d) && !lba_device_present(bus->busn_res.start, devfn, d)) {
384 DBG_CFG("%s(%x+%2x) -> -1 (b)\n", __func__, tok, pos);
385
386 *data = ~0U;
387 return(0);
388 }
389
390
391
392
393
394 LBA_CFG_ADDR_SETUP(d, tok | pos);
395 switch(size) {
396 case 1: *data = READ_REG8 (data_reg + (pos & 3)); break;
397 case 2: *data = READ_REG16(data_reg + (pos & 2)); break;
398 case 4: *data = READ_REG32(data_reg); break;
399 }
400 DBG_CFG("%s(%x+%2x) -> 0x%x (c)\n", __func__, tok, pos, *data);
401 return 0;
402}
403
404
405static void
406lba_wr_cfg(struct lba_device *d, u32 tok, u8 reg, u32 data, u32 size)
407{
408 int error = 0;
409 u32 arb_mask = 0;
410 u32 error_config = 0;
411 u32 status_control = 0;
412 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
413
414 LBA_CFG_SETUP(d, tok);
415 LBA_CFG_ADDR_SETUP(d, tok | reg);
416 switch (size) {
417 case 1: WRITE_REG8 (data, data_reg + (reg & 3)); break;
418 case 2: WRITE_REG16(data, data_reg + (reg & 2)); break;
419 case 4: WRITE_REG32(data, data_reg); break;
420 }
421 LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error);
422 LBA_CFG_RESTORE(d, d->hba.base_addr);
423}
424
425
426
427
428
429
430
431static int elroy_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
432{
433 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
434 u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
435 u32 tok = LBA_CFG_TOK(local_bus,devfn);
436
437 if ((pos > 255) || (devfn > 255))
438 return -EINVAL;
439
440 if (!LBA_SKIP_PROBE(d)) {
441
442 lba_wr_cfg(d, tok, pos, (u32) data, size);
443 DBG_CFG("%s(%x+%2x) = 0x%x (a)\n", __func__, tok, pos,data);
444 return 0;
445 }
446
447 if (LBA_SKIP_PROBE(d) && (!lba_device_present(bus->busn_res.start, devfn, d))) {
448 DBG_CFG("%s(%x+%2x) = 0x%x (b)\n", __func__, tok, pos,data);
449 return 1;
450 }
451
452 DBG_CFG("%s(%x+%2x) = 0x%x (c)\n", __func__, tok, pos, data);
453
454
455 LBA_CFG_ADDR_SETUP(d, tok | pos);
456 switch(size) {
457 case 1: WRITE_REG8 (data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 3));
458 break;
459 case 2: WRITE_REG16(data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 2));
460 break;
461 case 4: WRITE_REG32(data, d->hba.base_addr + LBA_PCI_CFG_DATA);
462 break;
463 }
464
465 lba_t32 = READ_REG32(d->hba.base_addr + LBA_PCI_CFG_ADDR);
466 return 0;
467}
468
469
470static struct pci_ops elroy_cfg_ops = {
471 .read = elroy_cfg_read,
472 .write = elroy_cfg_write,
473};
474
475
476
477
478
479
480
481static int mercury_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
482{
483 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
484 u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
485 u32 tok = LBA_CFG_TOK(local_bus, devfn);
486 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
487
488 if ((pos > 255) || (devfn > 255))
489 return -EINVAL;
490
491 LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
492 switch(size) {
493 case 1:
494 *data = READ_REG8(data_reg + (pos & 3));
495 break;
496 case 2:
497 *data = READ_REG16(data_reg + (pos & 2));
498 break;
499 case 4:
500 *data = READ_REG32(data_reg); break;
501 break;
502 }
503
504 DBG_CFG("mercury_cfg_read(%x+%2x) -> 0x%x\n", tok, pos, *data);
505 return 0;
506}
507
508
509
510
511
512
513static int mercury_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
514{
515 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
516 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
517 u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
518 u32 tok = LBA_CFG_TOK(local_bus,devfn);
519
520 if ((pos > 255) || (devfn > 255))
521 return -EINVAL;
522
523 DBG_CFG("%s(%x+%2x) <- 0x%x (c)\n", __func__, tok, pos, data);
524
525 LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
526 switch(size) {
527 case 1:
528 WRITE_REG8 (data, data_reg + (pos & 3));
529 break;
530 case 2:
531 WRITE_REG16(data, data_reg + (pos & 2));
532 break;
533 case 4:
534 WRITE_REG32(data, data_reg);
535 break;
536 }
537
538
539 lba_t32 = READ_U32(d->hba.base_addr + LBA_PCI_CFG_ADDR);
540 return 0;
541}
542
543static struct pci_ops mercury_cfg_ops = {
544 .read = mercury_cfg_read,
545 .write = mercury_cfg_write,
546};
547
548
549static void
550lba_bios_init(void)
551{
552 DBG(MODULE_NAME ": lba_bios_init\n");
553}
554
555
556#ifdef CONFIG_64BIT
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571static unsigned long
572truncate_pat_collision(struct resource *root, struct resource *new)
573{
574 unsigned long start = new->start;
575 unsigned long end = new->end;
576 struct resource *tmp = root->child;
577
578 if (end <= start || start < root->start || !tmp)
579 return 0;
580
581
582 while (tmp && tmp->end < start)
583 tmp = tmp->sibling;
584
585
586 if (!tmp) return 0;
587
588
589
590
591 if (tmp->start >= end) return 0;
592
593 if (tmp->start <= start) {
594
595 new->start = tmp->end + 1;
596
597 if (tmp->end >= end) {
598
599 return 1;
600 }
601 }
602
603 if (tmp->end < end ) {
604
605 new->end = tmp->start - 1;
606 }
607
608 printk(KERN_WARNING "LBA: Truncating lmmio_space [%lx/%lx] "
609 "to [%lx,%lx]\n",
610 start, end,
611 (long)new->start, (long)new->end );
612
613 return 0;
614}
615
616#else
617#define truncate_pat_collision(r,n) (0)
618#endif
619
620
621
622
623
624
625
626
627
628
629static void
630lba_fixup_bus(struct pci_bus *bus)
631{
632 struct pci_dev *dev;
633#ifdef FBB_SUPPORT
634 u16 status;
635#endif
636 struct lba_device *ldev = LBA_DEV(parisc_walk_tree(bus->bridge));
637
638 DBG("lba_fixup_bus(0x%p) bus %d platform_data 0x%p\n",
639 bus, (int)bus->busn_res.start, bus->bridge->platform_data);
640
641
642
643
644
645 if (bus->parent) {
646 int i;
647
648 pci_read_bridge_bases(bus);
649 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
650 pci_claim_resource(bus->self, i);
651 }
652 } else {
653
654 int err;
655
656 DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",
657 ldev->hba.io_space.name,
658 ldev->hba.io_space.start, ldev->hba.io_space.end,
659 ldev->hba.io_space.flags);
660 DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",
661 ldev->hba.lmmio_space.name,
662 ldev->hba.lmmio_space.start, ldev->hba.lmmio_space.end,
663 ldev->hba.lmmio_space.flags);
664
665 err = request_resource(&ioport_resource, &(ldev->hba.io_space));
666 if (err < 0) {
667 lba_dump_res(&ioport_resource, 2);
668 BUG();
669 }
670
671 if (ldev->hba.elmmio_space.start) {
672 err = request_resource(&iomem_resource,
673 &(ldev->hba.elmmio_space));
674 if (err < 0) {
675
676 printk("FAILED: lba_fixup_bus() request for "
677 "elmmio_space [%lx/%lx]\n",
678 (long)ldev->hba.elmmio_space.start,
679 (long)ldev->hba.elmmio_space.end);
680
681
682
683 }
684 }
685
686 if (ldev->hba.lmmio_space.flags) {
687 err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space));
688 if (err < 0) {
689 printk(KERN_ERR "FAILED: lba_fixup_bus() request for "
690 "lmmio_space [%lx/%lx]\n",
691 (long)ldev->hba.lmmio_space.start,
692 (long)ldev->hba.lmmio_space.end);
693 }
694 }
695
696#ifdef CONFIG_64BIT
697
698 if (ldev->hba.gmmio_space.flags) {
699 err = request_resource(&iomem_resource, &(ldev->hba.gmmio_space));
700 if (err < 0) {
701 printk("FAILED: lba_fixup_bus() request for "
702 "gmmio_space [%lx/%lx]\n",
703 (long)ldev->hba.gmmio_space.start,
704 (long)ldev->hba.gmmio_space.end);
705 lba_dump_res(&iomem_resource, 2);
706 BUG();
707 }
708 }
709#endif
710
711 }
712
713 list_for_each_entry(dev, &bus->devices, bus_list) {
714 int i;
715
716 DBG("lba_fixup_bus() %s\n", pci_name(dev));
717
718
719 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
720 struct resource *res = &dev->resource[i];
721
722
723 if (!res->start)
724 continue;
725
726
727
728
729
730
731 pci_claim_resource(dev, i);
732 }
733
734#ifdef FBB_SUPPORT
735
736
737
738
739 (void) pci_read_config_word(dev, PCI_STATUS, &status);
740 bus->bridge_ctl &= ~(status & PCI_STATUS_FAST_BACK);
741#endif
742
743
744
745
746 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)
747 continue;
748
749
750 iosapic_fixup_irq(ldev->iosapic_obj, dev);
751 }
752
753#ifdef FBB_SUPPORT
754
755
756
757
758 if (fbb_enable) {
759 if (bus->parent) {
760 u8 control;
761
762 (void) pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &control);
763 (void) pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, control | PCI_STATUS_FAST_BACK);
764
765 } else {
766
767 }
768 fbb_enable = PCI_COMMAND_FAST_BACK;
769 }
770
771
772 list_for_each_entry(dev, &bus->devices, bus_list) {
773 (void) pci_read_config_word(dev, PCI_COMMAND, &status);
774 status |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR | fbb_enable;
775 (void) pci_write_config_word(dev, PCI_COMMAND, status);
776 }
777#endif
778}
779
780
781static struct pci_bios_ops lba_bios_ops = {
782 .init = lba_bios_init,
783 .fixup_bus = lba_fixup_bus,
784};
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803#define LBA_PORT_IN(size, mask) \
804static u##size lba_astro_in##size (struct pci_hba_data *d, u16 addr) \
805{ \
806 u##size t; \
807 t = READ_REG##size(astro_iop_base + addr); \
808 DBG_PORT(" 0x%x\n", t); \
809 return (t); \
810}
811
812LBA_PORT_IN( 8, 3)
813LBA_PORT_IN(16, 2)
814LBA_PORT_IN(32, 0)
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844#define LBA_PORT_OUT(size, mask) \
845static void lba_astro_out##size (struct pci_hba_data *d, u16 addr, u##size val) \
846{ \
847 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __func__, d, addr, val); \
848 WRITE_REG##size(val, astro_iop_base + addr); \
849 if (LBA_DEV(d)->hw_rev < 3) \
850 lba_t32 = READ_U32(d->base_addr + LBA_FUNC_ID); \
851}
852
853LBA_PORT_OUT( 8, 3)
854LBA_PORT_OUT(16, 2)
855LBA_PORT_OUT(32, 0)
856
857
858static struct pci_port_ops lba_astro_port_ops = {
859 .inb = lba_astro_in8,
860 .inw = lba_astro_in16,
861 .inl = lba_astro_in32,
862 .outb = lba_astro_out8,
863 .outw = lba_astro_out16,
864 .outl = lba_astro_out32
865};
866
867
868#ifdef CONFIG_64BIT
869#define PIOP_TO_GMMIO(lba, addr) \
870 ((lba)->iop_base + (((addr)&0xFFFC)<<10) + ((addr)&3))
871
872
873
874
875
876
877
878
879
880
881
882
883
884#undef LBA_PORT_IN
885#define LBA_PORT_IN(size, mask) \
886static u##size lba_pat_in##size (struct pci_hba_data *l, u16 addr) \
887{ \
888 u##size t; \
889 DBG_PORT("%s(0x%p, 0x%x) ->", __func__, l, addr); \
890 t = READ_REG##size(PIOP_TO_GMMIO(LBA_DEV(l), addr)); \
891 DBG_PORT(" 0x%x\n", t); \
892 return (t); \
893}
894
895LBA_PORT_IN( 8, 3)
896LBA_PORT_IN(16, 2)
897LBA_PORT_IN(32, 0)
898
899
900#undef LBA_PORT_OUT
901#define LBA_PORT_OUT(size, mask) \
902static void lba_pat_out##size (struct pci_hba_data *l, u16 addr, u##size val) \
903{ \
904 void __iomem *where = PIOP_TO_GMMIO(LBA_DEV(l), addr); \
905 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __func__, l, addr, val); \
906 WRITE_REG##size(val, where); \
907 \
908 lba_t32 = READ_U32(l->base_addr + LBA_FUNC_ID); \
909}
910
911LBA_PORT_OUT( 8, 3)
912LBA_PORT_OUT(16, 2)
913LBA_PORT_OUT(32, 0)
914
915
916static struct pci_port_ops lba_pat_port_ops = {
917 .inb = lba_pat_in8,
918 .inw = lba_pat_in16,
919 .inl = lba_pat_in32,
920 .outb = lba_pat_out8,
921 .outw = lba_pat_out16,
922 .outl = lba_pat_out32
923};
924
925
926
927
928
929
930
931
932
933static void
934lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
935{
936 unsigned long bytecnt;
937 long io_count;
938 long status;
939 long pa_count;
940 pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
941 pdc_pat_cell_mod_maddr_block_t *io_pdc_cell;
942 int i;
943
944 pa_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL);
945 if (!pa_pdc_cell)
946 return;
947
948 io_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL);
949 if (!io_pdc_cell) {
950 kfree(pa_pdc_cell);
951 return;
952 }
953
954
955 status = pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
956 PA_VIEW, pa_pdc_cell);
957 pa_count = pa_pdc_cell->mod[1];
958
959 status |= pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
960 IO_VIEW, io_pdc_cell);
961 io_count = io_pdc_cell->mod[1];
962
963
964 if (status != PDC_OK) {
965 panic("pdc_pat_cell_module() call failed for LBA!\n");
966 }
967
968 if (PAT_GET_ENTITY(pa_pdc_cell->mod_info) != PAT_ENTITY_LBA) {
969 panic("pdc_pat_cell_module() entity returned != PAT_ENTITY_LBA!\n");
970 }
971
972
973
974
975 for (i = 0; i < pa_count; i++) {
976 struct {
977 unsigned long type;
978 unsigned long start;
979 unsigned long end;
980 } *p, *io;
981 struct resource *r;
982
983 p = (void *) &(pa_pdc_cell->mod[2+i*3]);
984 io = (void *) &(io_pdc_cell->mod[2+i*3]);
985
986
987 switch(p->type & 0xff) {
988 case PAT_PBNUM:
989 lba_dev->hba.bus_num.start = p->start;
990 lba_dev->hba.bus_num.end = p->end;
991 lba_dev->hba.bus_num.flags = IORESOURCE_BUS;
992 break;
993
994 case PAT_LMMIO:
995
996 if (!lba_dev->hba.lmmio_space.start) {
997 sprintf(lba_dev->hba.lmmio_name,
998 "PCI%02x LMMIO",
999 (int)lba_dev->hba.bus_num.start);
1000 lba_dev->hba.lmmio_space_offset = p->start -
1001 io->start;
1002 r = &lba_dev->hba.lmmio_space;
1003 r->name = lba_dev->hba.lmmio_name;
1004 } else if (!lba_dev->hba.elmmio_space.start) {
1005 sprintf(lba_dev->hba.elmmio_name,
1006 "PCI%02x ELMMIO",
1007 (int)lba_dev->hba.bus_num.start);
1008 r = &lba_dev->hba.elmmio_space;
1009 r->name = lba_dev->hba.elmmio_name;
1010 } else {
1011 printk(KERN_WARNING MODULE_NAME
1012 " only supports 2 LMMIO resources!\n");
1013 break;
1014 }
1015
1016 r->start = p->start;
1017 r->end = p->end;
1018 r->flags = IORESOURCE_MEM;
1019 r->parent = r->sibling = r->child = NULL;
1020 break;
1021
1022 case PAT_GMMIO:
1023
1024 sprintf(lba_dev->hba.gmmio_name, "PCI%02x GMMIO",
1025 (int)lba_dev->hba.bus_num.start);
1026 r = &lba_dev->hba.gmmio_space;
1027 r->name = lba_dev->hba.gmmio_name;
1028 r->start = p->start;
1029 r->end = p->end;
1030 r->flags = IORESOURCE_MEM;
1031 r->parent = r->sibling = r->child = NULL;
1032 break;
1033
1034 case PAT_NPIOP:
1035 printk(KERN_WARNING MODULE_NAME
1036 " range[%d] : ignoring NPIOP (0x%lx)\n",
1037 i, p->start);
1038 break;
1039
1040 case PAT_PIOP:
1041
1042
1043
1044
1045 lba_dev->iop_base = ioremap_nocache(p->start, 64 * 1024 * 1024);
1046
1047 sprintf(lba_dev->hba.io_name, "PCI%02x Ports",
1048 (int)lba_dev->hba.bus_num.start);
1049 r = &lba_dev->hba.io_space;
1050 r->name = lba_dev->hba.io_name;
1051 r->start = HBA_PORT_BASE(lba_dev->hba.hba_num);
1052 r->end = r->start + HBA_PORT_SPACE_SIZE - 1;
1053 r->flags = IORESOURCE_IO;
1054 r->parent = r->sibling = r->child = NULL;
1055 break;
1056
1057 default:
1058 printk(KERN_WARNING MODULE_NAME
1059 " range[%d] : unknown pat range type (0x%lx)\n",
1060 i, p->type & 0xff);
1061 break;
1062 }
1063 }
1064
1065 kfree(pa_pdc_cell);
1066 kfree(io_pdc_cell);
1067}
1068#else
1069
1070#define lba_pat_port_ops lba_astro_port_ops
1071#define lba_pat_resources(pa_dev, lba_dev)
1072#endif
1073
1074
1075extern void sba_distributed_lmmio(struct parisc_device *, struct resource *);
1076extern void sba_directed_lmmio(struct parisc_device *, struct resource *);
1077
1078
1079static void
1080lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
1081{
1082 struct resource *r;
1083 int lba_num;
1084
1085 lba_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
1086
1087
1088
1089
1090
1091
1092
1093
1094 lba_num = READ_REG32(lba_dev->hba.base_addr + LBA_FW_SCRATCH);
1095 r = &(lba_dev->hba.bus_num);
1096 r->name = "LBA PCI Busses";
1097 r->start = lba_num & 0xff;
1098 r->end = (lba_num>>8) & 0xff;
1099
1100
1101
1102
1103 r = &(lba_dev->hba.lmmio_space);
1104 sprintf(lba_dev->hba.lmmio_name, "PCI%02x LMMIO",
1105 (int)lba_dev->hba.bus_num.start);
1106 r->name = lba_dev->hba.lmmio_name;
1107
1108#if 1
1109
1110
1111
1112
1113 sba_distributed_lmmio(pa_dev, r);
1114#else
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_BASE);
1176 if (r->start & 1) {
1177 unsigned long rsize;
1178
1179 r->flags = IORESOURCE_MEM;
1180
1181 r->start &= mmio_mask;
1182 r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start);
1183 rsize = ~ READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_MASK);
1184
1185
1186
1187
1188
1189 rsize /= ROPES_PER_IOC;
1190 r->start += (rsize + 1) * LBA_NUM(pa_dev->hpa.start);
1191 r->end = r->start + rsize;
1192 } else {
1193 r->end = r->start = 0;
1194 }
1195#endif
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212 r = &(lba_dev->hba.elmmio_space);
1213 sprintf(lba_dev->hba.elmmio_name, "PCI%02x ELMMIO",
1214 (int)lba_dev->hba.bus_num.start);
1215 r->name = lba_dev->hba.elmmio_name;
1216
1217#if 1
1218
1219 sba_directed_lmmio(pa_dev, r);
1220#else
1221 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_BASE);
1222
1223 if (r->start & 1) {
1224 unsigned long rsize;
1225 r->flags = IORESOURCE_MEM;
1226
1227 r->start &= mmio_mask;
1228 r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start);
1229 rsize = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_MASK);
1230 r->end = r->start + ~rsize;
1231 }
1232#endif
1233
1234 r = &(lba_dev->hba.io_space);
1235 sprintf(lba_dev->hba.io_name, "PCI%02x Ports",
1236 (int)lba_dev->hba.bus_num.start);
1237 r->name = lba_dev->hba.io_name;
1238 r->flags = IORESOURCE_IO;
1239 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_IOS_BASE) & ~1L;
1240 r->end = r->start + (READ_REG32(lba_dev->hba.base_addr + LBA_IOS_MASK) ^ (HBA_PORT_SPACE_SIZE - 1));
1241
1242
1243 lba_num = HBA_PORT_BASE(lba_dev->hba.hba_num);
1244 r->start |= lba_num;
1245 r->end |= lba_num;
1246}
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261static int __init
1262lba_hw_init(struct lba_device *d)
1263{
1264 u32 stat;
1265 u32 bus_reset;
1266
1267#if 0
1268 printk(KERN_DEBUG "LBA %lx STAT_CTL %Lx ERROR_CFG %Lx STATUS %Lx DMA_CTL %Lx\n",
1269 d->hba.base_addr,
1270 READ_REG64(d->hba.base_addr + LBA_STAT_CTL),
1271 READ_REG64(d->hba.base_addr + LBA_ERROR_CONFIG),
1272 READ_REG64(d->hba.base_addr + LBA_ERROR_STATUS),
1273 READ_REG64(d->hba.base_addr + LBA_DMA_CTL) );
1274 printk(KERN_DEBUG " ARB mask %Lx pri %Lx mode %Lx mtlt %Lx\n",
1275 READ_REG64(d->hba.base_addr + LBA_ARB_MASK),
1276 READ_REG64(d->hba.base_addr + LBA_ARB_PRI),
1277 READ_REG64(d->hba.base_addr + LBA_ARB_MODE),
1278 READ_REG64(d->hba.base_addr + LBA_ARB_MTLT) );
1279 printk(KERN_DEBUG " HINT cfg 0x%Lx\n",
1280 READ_REG64(d->hba.base_addr + LBA_HINT_CFG));
1281 printk(KERN_DEBUG " HINT reg ");
1282 { int i;
1283 for (i=LBA_HINT_BASE; i< (14*8 + LBA_HINT_BASE); i+=8)
1284 printk(" %Lx", READ_REG64(d->hba.base_addr + i));
1285 }
1286 printk("\n");
1287#endif
1288
1289#ifdef CONFIG_64BIT
1290
1291
1292
1293
1294
1295#endif
1296
1297
1298 bus_reset = READ_REG32(d->hba.base_addr + LBA_STAT_CTL + 4) & 1;
1299 if (bus_reset) {
1300 printk(KERN_DEBUG "NOTICE: PCI bus reset still asserted! (clearing)\n");
1301 }
1302
1303 stat = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG);
1304 if (stat & LBA_SMART_MODE) {
1305 printk(KERN_DEBUG "NOTICE: LBA in SMART mode! (cleared)\n");
1306 stat &= ~LBA_SMART_MODE;
1307 WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG);
1308 }
1309
1310
1311 stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL);
1312 WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
1313
1314
1315
1316
1317
1318
1319 if (bus_reset)
1320 mdelay(pci_post_reset_delay);
1321
1322 if (0 == READ_REG32(d->hba.base_addr + LBA_ARB_MASK)) {
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332 printk(KERN_DEBUG "NOTICE: Enabling PCI Arbitration\n");
1333 WRITE_REG32(0x3, d->hba.base_addr + LBA_ARB_MASK);
1334 }
1335
1336
1337
1338
1339
1340
1341 return 0;
1342}
1343
1344
1345
1346
1347
1348
1349
1350
1351static unsigned int lba_next_bus = 0;
1352
1353
1354
1355
1356
1357
1358static int __init
1359lba_driver_probe(struct parisc_device *dev)
1360{
1361 struct lba_device *lba_dev;
1362 LIST_HEAD(resources);
1363 struct pci_bus *lba_bus;
1364 struct pci_ops *cfg_ops;
1365 u32 func_class;
1366 void *tmp_obj;
1367 char *version;
1368 void __iomem *addr = ioremap_nocache(dev->hpa.start, 4096);
1369 int max;
1370
1371
1372 func_class = READ_REG32(addr + LBA_FCLASS);
1373
1374 if (IS_ELROY(dev)) {
1375 func_class &= 0xf;
1376 switch (func_class) {
1377 case 0: version = "TR1.0"; break;
1378 case 1: version = "TR2.0"; break;
1379 case 2: version = "TR2.1"; break;
1380 case 3: version = "TR2.2"; break;
1381 case 4: version = "TR3.0"; break;
1382 case 5: version = "TR4.0"; break;
1383 default: version = "TR4+";
1384 }
1385
1386 printk(KERN_INFO "Elroy version %s (0x%x) found at 0x%lx\n",
1387 version, func_class & 0xf, (long)dev->hpa.start);
1388
1389 if (func_class < 2) {
1390 printk(KERN_WARNING "Can't support LBA older than "
1391 "TR2.1 - continuing under adversity.\n");
1392 }
1393
1394#if 0
1395
1396
1397
1398 if (func_class > 4) {
1399 cfg_ops = &mercury_cfg_ops;
1400 } else
1401#endif
1402 {
1403 cfg_ops = &elroy_cfg_ops;
1404 }
1405
1406 } else if (IS_MERCURY(dev) || IS_QUICKSILVER(dev)) {
1407 int major, minor;
1408
1409 func_class &= 0xff;
1410 major = func_class >> 4, minor = func_class & 0xf;
1411
1412
1413
1414
1415 printk(KERN_INFO "%s version TR%d.%d (0x%x) found at 0x%lx\n",
1416 IS_MERCURY(dev) ? "Mercury" : "Quicksilver", major,
1417 minor, func_class, (long)dev->hpa.start);
1418
1419 cfg_ops = &mercury_cfg_ops;
1420 } else {
1421 printk(KERN_ERR "Unknown LBA found at 0x%lx\n",
1422 (long)dev->hpa.start);
1423 return -ENODEV;
1424 }
1425
1426
1427 tmp_obj = iosapic_register(dev->hpa.start + LBA_IOSAPIC_BASE);
1428
1429
1430
1431
1432
1433 lba_dev = kzalloc(sizeof(struct lba_device), GFP_KERNEL);
1434 if (!lba_dev) {
1435 printk(KERN_ERR "lba_init_chip - couldn't alloc lba_device\n");
1436 return(1);
1437 }
1438
1439
1440
1441
1442 lba_dev->hw_rev = func_class;
1443 lba_dev->hba.base_addr = addr;
1444 lba_dev->hba.dev = dev;
1445 lba_dev->iosapic_obj = tmp_obj;
1446 lba_dev->hba.iommu = sba_get_iommu(dev);
1447 parisc_set_drvdata(dev, lba_dev);
1448
1449
1450 pci_bios = &lba_bios_ops;
1451 pcibios_register_hba(HBA_DATA(lba_dev));
1452 spin_lock_init(&lba_dev->lba_lock);
1453
1454 if (lba_hw_init(lba_dev))
1455 return(1);
1456
1457
1458
1459 if (is_pdc_pat()) {
1460
1461 pci_port = &lba_pat_port_ops;
1462
1463 lba_pat_resources(dev, lba_dev);
1464 } else {
1465 if (!astro_iop_base) {
1466
1467 astro_iop_base = ioremap_nocache(LBA_PORT_BASE, 64 * 1024);
1468 pci_port = &lba_astro_port_ops;
1469 }
1470
1471
1472 lba_legacy_resources(dev, lba_dev);
1473 }
1474
1475 if (lba_dev->hba.bus_num.start < lba_next_bus)
1476 lba_dev->hba.bus_num.start = lba_next_bus;
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487 if (truncate_pat_collision(&iomem_resource,
1488 &(lba_dev->hba.lmmio_space))) {
1489 printk(KERN_WARNING "LBA: lmmio_space [%lx/%lx] duplicate!\n",
1490 (long)lba_dev->hba.lmmio_space.start,
1491 (long)lba_dev->hba.lmmio_space.end);
1492 lba_dev->hba.lmmio_space.flags = 0;
1493 }
1494
1495 pci_add_resource_offset(&resources, &lba_dev->hba.io_space,
1496 HBA_PORT_BASE(lba_dev->hba.hba_num));
1497 if (lba_dev->hba.elmmio_space.start)
1498 pci_add_resource_offset(&resources, &lba_dev->hba.elmmio_space,
1499 lba_dev->hba.lmmio_space_offset);
1500 if (lba_dev->hba.lmmio_space.flags)
1501 pci_add_resource_offset(&resources, &lba_dev->hba.lmmio_space,
1502 lba_dev->hba.lmmio_space_offset);
1503 if (lba_dev->hba.gmmio_space.flags)
1504 pci_add_resource(&resources, &lba_dev->hba.gmmio_space);
1505
1506 pci_add_resource(&resources, &lba_dev->hba.bus_num);
1507
1508 dev->dev.platform_data = lba_dev;
1509 lba_bus = lba_dev->hba.hba_bus =
1510 pci_create_root_bus(&dev->dev, lba_dev->hba.bus_num.start,
1511 cfg_ops, NULL, &resources);
1512 if (!lba_bus) {
1513 pci_free_resource_list(&resources);
1514 return 0;
1515 }
1516
1517 max = pci_scan_child_bus(lba_bus);
1518
1519
1520 if (is_pdc_pat()) {
1521
1522
1523 DBG_PAT("LBA pci_bus_size_bridges()\n");
1524 pci_bus_size_bridges(lba_bus);
1525
1526 DBG_PAT("LBA pci_bus_assign_resources()\n");
1527 pci_bus_assign_resources(lba_bus);
1528
1529#ifdef DEBUG_LBA_PAT
1530 DBG_PAT("\nLBA PIOP resource tree\n");
1531 lba_dump_res(&lba_dev->hba.io_space, 2);
1532 DBG_PAT("\nLBA LMMIO resource tree\n");
1533 lba_dump_res(&lba_dev->hba.lmmio_space, 2);
1534#endif
1535 }
1536 pci_enable_bridges(lba_bus);
1537
1538
1539
1540
1541
1542
1543 if (cfg_ops == &elroy_cfg_ops) {
1544 lba_dev->flags |= LBA_FLAG_SKIP_PROBE;
1545 }
1546
1547 lba_next_bus = max + 1;
1548 pci_bus_add_devices(lba_bus);
1549
1550
1551 return 0;
1552}
1553
1554static struct parisc_device_id lba_tbl[] = {
1555 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, ELROY_HVERS, 0xa },
1556 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, MERCURY_HVERS, 0xa },
1557 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, QUICKSILVER_HVERS, 0xa },
1558 { 0, }
1559};
1560
1561static struct parisc_driver lba_driver = {
1562 .name = MODULE_NAME,
1563 .id_table = lba_tbl,
1564 .probe = lba_driver_probe,
1565};
1566
1567
1568
1569
1570
1571void __init lba_init(void)
1572{
1573 register_parisc_driver(&lba_driver);
1574}
1575
1576
1577
1578
1579
1580
1581void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
1582{
1583 void __iomem * base_addr = ioremap_nocache(lba->hpa.start, 4096);
1584
1585 imask <<= 2;
1586
1587
1588 WARN_ON((ibase & 0x001fffff) != 0);
1589 WARN_ON((imask & 0x001fffff) != 0);
1590
1591 DBG("%s() ibase 0x%x imask 0x%x\n", __func__, ibase, imask);
1592 WRITE_REG32( imask, base_addr + LBA_IMASK);
1593 WRITE_REG32( ibase, base_addr + LBA_IBASE);
1594 iounmap(base_addr);
1595}
1596
1597