1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/delay.h>
34#include <linux/types.h>
35#include <linux/kernel.h>
36#include <linux/spinlock.h>
37#include <linux/init.h>
38#include <linux/pci.h>
39#include <linux/ioport.h>
40#include <linux/slab.h>
41
42#include <asm/byteorder.h>
43#include <asm/pdc.h>
44#include <asm/pdcpat.h>
45#include <asm/page.h>
46
47#include <asm/ropes.h>
48#include <asm/hardware.h>
49#include <asm/parisc-device.h>
50#include <asm/io.h>
51
52#undef DEBUG_LBA
53#undef DEBUG_LBA_PORT
54#undef DEBUG_LBA_CFG
55#undef DEBUG_LBA_PAT
56
57#undef FBB_SUPPORT
58
59
60#ifdef DEBUG_LBA
61#define DBG(x...) printk(x)
62#else
63#define DBG(x...)
64#endif
65
66#ifdef DEBUG_LBA_PORT
67#define DBG_PORT(x...) printk(x)
68#else
69#define DBG_PORT(x...)
70#endif
71
72#ifdef DEBUG_LBA_CFG
73#define DBG_CFG(x...) printk(x)
74#else
75#define DBG_CFG(x...)
76#endif
77
78#ifdef DEBUG_LBA_PAT
79#define DBG_PAT(x...) printk(x)
80#else
81#define DBG_PAT(x...)
82#endif
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99#define MODULE_NAME "LBA"
100
101
102#define LBA_PORT_BASE (PCI_F_EXTEND | 0xfee00000UL)
103static void __iomem *astro_iop_base __read_mostly;
104
105static u32 lba_t32;
106
107
108#define LBA_FLAG_SKIP_PROBE 0x10
109
110#define LBA_SKIP_PROBE(d) ((d)->flags & LBA_FLAG_SKIP_PROBE)
111
112
113
114#define LBA_DEV(d) ((struct lba_device *) (d))
115
116
117
118
119
120
121#define LBA_MAX_NUM_BUSES 8
122
123
124
125
126
127
128
129#define READ_U8(addr) __raw_readb(addr)
130#define READ_U16(addr) __raw_readw(addr)
131#define READ_U32(addr) __raw_readl(addr)
132#define WRITE_U8(value, addr) __raw_writeb(value, addr)
133#define WRITE_U16(value, addr) __raw_writew(value, addr)
134#define WRITE_U32(value, addr) __raw_writel(value, addr)
135
136#define READ_REG8(addr) readb(addr)
137#define READ_REG16(addr) readw(addr)
138#define READ_REG32(addr) readl(addr)
139#define READ_REG64(addr) readq(addr)
140#define WRITE_REG8(value, addr) writeb(value, addr)
141#define WRITE_REG16(value, addr) writew(value, addr)
142#define WRITE_REG32(value, addr) writel(value, addr)
143
144
145#define LBA_CFG_TOK(bus,dfn) ((u32) ((bus)<<16 | (dfn)<<8))
146#define LBA_CFG_BUS(tok) ((u8) ((tok)>>16))
147#define LBA_CFG_DEV(tok) ((u8) ((tok)>>11) & 0x1f)
148#define LBA_CFG_FUNC(tok) ((u8) ((tok)>>8 ) & 0x7)
149
150
151
152
153
154
155#define ROPES_PER_IOC 8
156#define LBA_NUM(x) ((((unsigned long) x) >> 13) & (ROPES_PER_IOC-1))
157
158
159static void
160lba_dump_res(struct resource *r, int d)
161{
162 int i;
163
164 if (NULL == r)
165 return;
166
167 printk(KERN_DEBUG "(%p)", r->parent);
168 for (i = d; i ; --i) printk(" ");
169 printk(KERN_DEBUG "%p [%lx,%lx]/%lx\n", r,
170 (long)r->start, (long)r->end, r->flags);
171 lba_dump_res(r->child, d+2);
172 lba_dump_res(r->sibling, d);
173}
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190static int lba_device_present(u8 bus, u8 dfn, struct lba_device *d)
191{
192 u8 first_bus = d->hba.hba_bus->busn_res.start;
193 u8 last_sub_bus = d->hba.hba_bus->busn_res.end;
194
195 if ((bus < first_bus) ||
196 (bus > last_sub_bus) ||
197 ((bus - first_bus) >= LBA_MAX_NUM_BUSES)) {
198 return 0;
199 }
200
201 return 1;
202}
203
204
205
206#define LBA_CFG_SETUP(d, tok) { \
207 \
208 error_config = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG); \
209\
210 \
211 status_control = READ_REG32(d->hba.base_addr + LBA_STAT_CTL); \
212\
213
214
215 \
216 \
217 arb_mask = READ_REG32(d->hba.base_addr + LBA_ARB_MASK); \
218\
219
220
221
222 \
223 WRITE_REG32(0x1, d->hba.base_addr + LBA_ARB_MASK); \
224\
225
226
227
228 \
229 WRITE_REG32(error_config | LBA_SMART_MODE, d->hba.base_addr + LBA_ERROR_CONFIG); \
230}
231
232
233#define LBA_CFG_PROBE(d, tok) { \
234
235
236
237 \
238 WRITE_REG32(tok | PCI_VENDOR_ID, (d)->hba.base_addr + LBA_PCI_CFG_ADDR);\
239
240
241
242 \
243 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
244
245
246
247 \
248 WRITE_REG32(~0, (d)->hba.base_addr + LBA_PCI_CFG_DATA); \
249
250
251
252 \
253 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
254}
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281#define LBA_MASTER_ABORT_ERROR 0xc
282#define LBA_FATAL_ERROR 0x10
283
284#define LBA_CFG_MASTER_ABORT_CHECK(d, base, tok, error) { \
285 u32 error_status = 0; \
286
287
288
289 \
290 WRITE_REG32(status_control | CLEAR_ERRLOG_ENABLE, base + LBA_STAT_CTL); \
291 error_status = READ_REG32(base + LBA_ERROR_STATUS); \
292 if ((error_status & 0x1f) != 0) { \
293
294
295 \
296 error = 1; \
297 if ((error_status & LBA_FATAL_ERROR) == 0) { \
298
299
300
301 \
302 WRITE_REG32(status_control | CLEAR_ERRLOG, base + LBA_STAT_CTL); \
303 } \
304 } \
305}
306
307#define LBA_CFG_TR4_ADDR_SETUP(d, addr) \
308 WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR);
309
310#define LBA_CFG_ADDR_SETUP(d, addr) { \
311 WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
312
313
314
315 \
316 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
317}
318
319
320#define LBA_CFG_RESTORE(d, base) { \
321
322
323 \
324 WRITE_REG32(status_control, base + LBA_STAT_CTL); \
325
326
327 \
328 WRITE_REG32(error_config, base + LBA_ERROR_CONFIG); \
329
330
331 \
332 WRITE_REG32(arb_mask, base + LBA_ARB_MASK); \
333}
334
335
336
337static unsigned int
338lba_rd_cfg(struct lba_device *d, u32 tok, u8 reg, u32 size)
339{
340 u32 data = ~0U;
341 int error = 0;
342 u32 arb_mask = 0;
343 u32 error_config = 0;
344 u32 status_control = 0;
345
346 LBA_CFG_SETUP(d, tok);
347 LBA_CFG_PROBE(d, tok);
348 LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error);
349 if (!error) {
350 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
351
352 LBA_CFG_ADDR_SETUP(d, tok | reg);
353 switch (size) {
354 case 1: data = (u32) READ_REG8(data_reg + (reg & 3)); break;
355 case 2: data = (u32) READ_REG16(data_reg+ (reg & 2)); break;
356 case 4: data = READ_REG32(data_reg); break;
357 }
358 }
359 LBA_CFG_RESTORE(d, d->hba.base_addr);
360 return(data);
361}
362
363
364static int elroy_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
365{
366 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
367 u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
368 u32 tok = LBA_CFG_TOK(local_bus, devfn);
369 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
370
371 if ((pos > 255) || (devfn > 255))
372 return -EINVAL;
373
374
375 {
376
377
378 *data = lba_rd_cfg(d, tok, pos, size);
379 DBG_CFG("%s(%x+%2x) -> 0x%x (a)\n", __func__, tok, pos, *data);
380 return 0;
381 }
382
383 if (LBA_SKIP_PROBE(d) && !lba_device_present(bus->busn_res.start, devfn, d)) {
384 DBG_CFG("%s(%x+%2x) -> -1 (b)\n", __func__, tok, pos);
385
386 *data = ~0U;
387 return(0);
388 }
389
390
391
392
393
394 LBA_CFG_ADDR_SETUP(d, tok | pos);
395 switch(size) {
396 case 1: *data = READ_REG8 (data_reg + (pos & 3)); break;
397 case 2: *data = READ_REG16(data_reg + (pos & 2)); break;
398 case 4: *data = READ_REG32(data_reg); break;
399 }
400 DBG_CFG("%s(%x+%2x) -> 0x%x (c)\n", __func__, tok, pos, *data);
401 return 0;
402}
403
404
405static void
406lba_wr_cfg(struct lba_device *d, u32 tok, u8 reg, u32 data, u32 size)
407{
408 int error = 0;
409 u32 arb_mask = 0;
410 u32 error_config = 0;
411 u32 status_control = 0;
412 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
413
414 LBA_CFG_SETUP(d, tok);
415 LBA_CFG_ADDR_SETUP(d, tok | reg);
416 switch (size) {
417 case 1: WRITE_REG8 (data, data_reg + (reg & 3)); break;
418 case 2: WRITE_REG16(data, data_reg + (reg & 2)); break;
419 case 4: WRITE_REG32(data, data_reg); break;
420 }
421 LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error);
422 LBA_CFG_RESTORE(d, d->hba.base_addr);
423}
424
425
426
427
428
429
430
431static int elroy_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
432{
433 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
434 u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
435 u32 tok = LBA_CFG_TOK(local_bus,devfn);
436
437 if ((pos > 255) || (devfn > 255))
438 return -EINVAL;
439
440 if (!LBA_SKIP_PROBE(d)) {
441
442 lba_wr_cfg(d, tok, pos, (u32) data, size);
443 DBG_CFG("%s(%x+%2x) = 0x%x (a)\n", __func__, tok, pos,data);
444 return 0;
445 }
446
447 if (LBA_SKIP_PROBE(d) && (!lba_device_present(bus->busn_res.start, devfn, d))) {
448 DBG_CFG("%s(%x+%2x) = 0x%x (b)\n", __func__, tok, pos,data);
449 return 1;
450 }
451
452 DBG_CFG("%s(%x+%2x) = 0x%x (c)\n", __func__, tok, pos, data);
453
454
455 LBA_CFG_ADDR_SETUP(d, tok | pos);
456 switch(size) {
457 case 1: WRITE_REG8 (data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 3));
458 break;
459 case 2: WRITE_REG16(data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 2));
460 break;
461 case 4: WRITE_REG32(data, d->hba.base_addr + LBA_PCI_CFG_DATA);
462 break;
463 }
464
465 lba_t32 = READ_REG32(d->hba.base_addr + LBA_PCI_CFG_ADDR);
466 return 0;
467}
468
469
470static struct pci_ops elroy_cfg_ops = {
471 .read = elroy_cfg_read,
472 .write = elroy_cfg_write,
473};
474
475
476
477
478
479
480
481static int mercury_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
482{
483 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
484 u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
485 u32 tok = LBA_CFG_TOK(local_bus, devfn);
486 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
487
488 if ((pos > 255) || (devfn > 255))
489 return -EINVAL;
490
491 LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
492 switch(size) {
493 case 1:
494 *data = READ_REG8(data_reg + (pos & 3));
495 break;
496 case 2:
497 *data = READ_REG16(data_reg + (pos & 2));
498 break;
499 case 4:
500 *data = READ_REG32(data_reg); break;
501 break;
502 }
503
504 DBG_CFG("mercury_cfg_read(%x+%2x) -> 0x%x\n", tok, pos, *data);
505 return 0;
506}
507
508
509
510
511
512
513static int mercury_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
514{
515 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
516 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
517 u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
518 u32 tok = LBA_CFG_TOK(local_bus,devfn);
519
520 if ((pos > 255) || (devfn > 255))
521 return -EINVAL;
522
523 DBG_CFG("%s(%x+%2x) <- 0x%x (c)\n", __func__, tok, pos, data);
524
525 LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
526 switch(size) {
527 case 1:
528 WRITE_REG8 (data, data_reg + (pos & 3));
529 break;
530 case 2:
531 WRITE_REG16(data, data_reg + (pos & 2));
532 break;
533 case 4:
534 WRITE_REG32(data, data_reg);
535 break;
536 }
537
538
539 lba_t32 = READ_U32(d->hba.base_addr + LBA_PCI_CFG_ADDR);
540 return 0;
541}
542
543static struct pci_ops mercury_cfg_ops = {
544 .read = mercury_cfg_read,
545 .write = mercury_cfg_write,
546};
547
548
549static void
550lba_bios_init(void)
551{
552 DBG(MODULE_NAME ": lba_bios_init\n");
553}
554
555
556#ifdef CONFIG_64BIT
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571static unsigned long
572truncate_pat_collision(struct resource *root, struct resource *new)
573{
574 unsigned long start = new->start;
575 unsigned long end = new->end;
576 struct resource *tmp = root->child;
577
578 if (end <= start || start < root->start || !tmp)
579 return 0;
580
581
582 while (tmp && tmp->end < start)
583 tmp = tmp->sibling;
584
585
586 if (!tmp) return 0;
587
588
589
590
591 if (tmp->start >= end) return 0;
592
593 if (tmp->start <= start) {
594
595 new->start = tmp->end + 1;
596
597 if (tmp->end >= end) {
598
599 return 1;
600 }
601 }
602
603 if (tmp->end < end ) {
604
605 new->end = tmp->start - 1;
606 }
607
608 printk(KERN_WARNING "LBA: Truncating lmmio_space [%lx/%lx] "
609 "to [%lx,%lx]\n",
610 start, end,
611 (long)new->start, (long)new->end );
612
613 return 0;
614}
615
616
617
618
619
620
621
622static unsigned long
623extend_lmmio_len(unsigned long start, unsigned long end, unsigned long lba_len)
624{
625 struct resource *tmp;
626
627
628 if (boot_cpu_data.cpu_type < mako)
629 return end;
630
631 pr_debug("LMMIO mismatch: PAT length = 0x%lx, MASK register = 0x%lx\n",
632 end - start, lba_len);
633
634 lba_len = min(lba_len+1, 256UL*1024*1024);
635
636 pr_debug("LBA: lmmio_space [0x%lx-0x%lx] - original\n", start, end);
637
638
639 end += lba_len;
640 if (end < start)
641 end = -1ULL;
642
643 pr_debug("LBA: lmmio_space [0x%lx-0x%lx] - current\n", start, end);
644
645
646 for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) {
647 pr_debug("LBA: testing %pR\n", tmp);
648 if (tmp->start == start)
649 continue;
650 if (tmp->end < start)
651 continue;
652 if (tmp->start > end)
653 continue;
654 if (end >= tmp->start)
655 end = tmp->start - 1;
656 }
657
658 pr_info("LBA: lmmio_space [0x%lx-0x%lx] - new\n", start, end);
659
660
661 return end;
662}
663
664#else
665#define truncate_pat_collision(r,n) (0)
666#endif
667
668
669
670
671
672
673
674
675
676
677static void
678lba_fixup_bus(struct pci_bus *bus)
679{
680 struct pci_dev *dev;
681#ifdef FBB_SUPPORT
682 u16 status;
683#endif
684 struct lba_device *ldev = LBA_DEV(parisc_walk_tree(bus->bridge));
685
686 DBG("lba_fixup_bus(0x%p) bus %d platform_data 0x%p\n",
687 bus, (int)bus->busn_res.start, bus->bridge->platform_data);
688
689
690
691
692
693 if (bus->parent) {
694 int i;
695
696 pci_read_bridge_bases(bus);
697 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++)
698 pci_claim_bridge_resource(bus->self, i);
699 } else {
700
701 int err;
702
703 DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",
704 ldev->hba.io_space.name,
705 ldev->hba.io_space.start, ldev->hba.io_space.end,
706 ldev->hba.io_space.flags);
707 DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",
708 ldev->hba.lmmio_space.name,
709 ldev->hba.lmmio_space.start, ldev->hba.lmmio_space.end,
710 ldev->hba.lmmio_space.flags);
711
712 err = request_resource(&ioport_resource, &(ldev->hba.io_space));
713 if (err < 0) {
714 lba_dump_res(&ioport_resource, 2);
715 BUG();
716 }
717
718 if (ldev->hba.elmmio_space.flags) {
719 err = request_resource(&iomem_resource,
720 &(ldev->hba.elmmio_space));
721 if (err < 0) {
722
723 printk("FAILED: lba_fixup_bus() request for "
724 "elmmio_space [%lx/%lx]\n",
725 (long)ldev->hba.elmmio_space.start,
726 (long)ldev->hba.elmmio_space.end);
727
728
729
730 }
731 }
732
733 if (ldev->hba.lmmio_space.flags) {
734 err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space));
735 if (err < 0) {
736 printk(KERN_ERR "FAILED: lba_fixup_bus() request for "
737 "lmmio_space [%lx/%lx]\n",
738 (long)ldev->hba.lmmio_space.start,
739 (long)ldev->hba.lmmio_space.end);
740 }
741 }
742
743#ifdef CONFIG_64BIT
744
745 if (ldev->hba.gmmio_space.flags) {
746 err = request_resource(&iomem_resource, &(ldev->hba.gmmio_space));
747 if (err < 0) {
748 printk("FAILED: lba_fixup_bus() request for "
749 "gmmio_space [%lx/%lx]\n",
750 (long)ldev->hba.gmmio_space.start,
751 (long)ldev->hba.gmmio_space.end);
752 lba_dump_res(&iomem_resource, 2);
753 BUG();
754 }
755 }
756#endif
757
758 }
759
760 list_for_each_entry(dev, &bus->devices, bus_list) {
761 int i;
762
763 DBG("lba_fixup_bus() %s\n", pci_name(dev));
764
765
766 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
767 struct resource *res = &dev->resource[i];
768
769
770 if (!res->start)
771 continue;
772
773
774
775
776
777
778 pci_claim_resource(dev, i);
779 }
780
781#ifdef FBB_SUPPORT
782
783
784
785
786 (void) pci_read_config_word(dev, PCI_STATUS, &status);
787 bus->bridge_ctl &= ~(status & PCI_STATUS_FAST_BACK);
788#endif
789
790
791
792
793 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
794 pcibios_init_bridge(dev);
795 continue;
796 }
797
798
799 iosapic_fixup_irq(ldev->iosapic_obj, dev);
800 }
801
802#ifdef FBB_SUPPORT
803
804
805
806
807 if (fbb_enable) {
808 if (bus->parent) {
809 u8 control;
810
811 (void) pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &control);
812 (void) pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, control | PCI_STATUS_FAST_BACK);
813
814 } else {
815
816 }
817 fbb_enable = PCI_COMMAND_FAST_BACK;
818 }
819
820
821 list_for_each_entry(dev, &bus->devices, bus_list) {
822 (void) pci_read_config_word(dev, PCI_COMMAND, &status);
823 status |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR | fbb_enable;
824 (void) pci_write_config_word(dev, PCI_COMMAND, status);
825 }
826#endif
827}
828
829
830static struct pci_bios_ops lba_bios_ops = {
831 .init = lba_bios_init,
832 .fixup_bus = lba_fixup_bus,
833};
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852#define LBA_PORT_IN(size, mask) \
853static u##size lba_astro_in##size (struct pci_hba_data *d, u16 addr) \
854{ \
855 u##size t; \
856 t = READ_REG##size(astro_iop_base + addr); \
857 DBG_PORT(" 0x%x\n", t); \
858 return (t); \
859}
860
861LBA_PORT_IN( 8, 3)
862LBA_PORT_IN(16, 2)
863LBA_PORT_IN(32, 0)
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893#define LBA_PORT_OUT(size, mask) \
894static void lba_astro_out##size (struct pci_hba_data *d, u16 addr, u##size val) \
895{ \
896 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __func__, d, addr, val); \
897 WRITE_REG##size(val, astro_iop_base + addr); \
898 if (LBA_DEV(d)->hw_rev < 3) \
899 lba_t32 = READ_U32(d->base_addr + LBA_FUNC_ID); \
900}
901
902LBA_PORT_OUT( 8, 3)
903LBA_PORT_OUT(16, 2)
904LBA_PORT_OUT(32, 0)
905
906
907static struct pci_port_ops lba_astro_port_ops = {
908 .inb = lba_astro_in8,
909 .inw = lba_astro_in16,
910 .inl = lba_astro_in32,
911 .outb = lba_astro_out8,
912 .outw = lba_astro_out16,
913 .outl = lba_astro_out32
914};
915
916
917#ifdef CONFIG_64BIT
918#define PIOP_TO_GMMIO(lba, addr) \
919 ((lba)->iop_base + (((addr)&0xFFFC)<<10) + ((addr)&3))
920
921
922
923
924
925
926
927
928
929
930
931
932
933#undef LBA_PORT_IN
934#define LBA_PORT_IN(size, mask) \
935static u##size lba_pat_in##size (struct pci_hba_data *l, u16 addr) \
936{ \
937 u##size t; \
938 DBG_PORT("%s(0x%p, 0x%x) ->", __func__, l, addr); \
939 t = READ_REG##size(PIOP_TO_GMMIO(LBA_DEV(l), addr)); \
940 DBG_PORT(" 0x%x\n", t); \
941 return (t); \
942}
943
944LBA_PORT_IN( 8, 3)
945LBA_PORT_IN(16, 2)
946LBA_PORT_IN(32, 0)
947
948
949#undef LBA_PORT_OUT
950#define LBA_PORT_OUT(size, mask) \
951static void lba_pat_out##size (struct pci_hba_data *l, u16 addr, u##size val) \
952{ \
953 void __iomem *where = PIOP_TO_GMMIO(LBA_DEV(l), addr); \
954 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __func__, l, addr, val); \
955 WRITE_REG##size(val, where); \
956 \
957 lba_t32 = READ_U32(l->base_addr + LBA_FUNC_ID); \
958}
959
960LBA_PORT_OUT( 8, 3)
961LBA_PORT_OUT(16, 2)
962LBA_PORT_OUT(32, 0)
963
964
965static struct pci_port_ops lba_pat_port_ops = {
966 .inb = lba_pat_in8,
967 .inw = lba_pat_in16,
968 .inl = lba_pat_in32,
969 .outb = lba_pat_out8,
970 .outw = lba_pat_out16,
971 .outl = lba_pat_out32
972};
973
974
975
976
977
978
979
980
981
982static void
983lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
984{
985 unsigned long bytecnt;
986 long io_count;
987 long status;
988 long pa_count;
989 pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
990 pdc_pat_cell_mod_maddr_block_t *io_pdc_cell;
991 int i;
992
993 pa_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL);
994 if (!pa_pdc_cell)
995 return;
996
997 io_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL);
998 if (!io_pdc_cell) {
999 kfree(pa_pdc_cell);
1000 return;
1001 }
1002
1003
1004 status = pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
1005 PA_VIEW, pa_pdc_cell);
1006 pa_count = pa_pdc_cell->mod[1];
1007
1008 status |= pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
1009 IO_VIEW, io_pdc_cell);
1010 io_count = io_pdc_cell->mod[1];
1011
1012
1013 if (status != PDC_OK) {
1014 panic("pdc_pat_cell_module() call failed for LBA!\n");
1015 }
1016
1017 if (PAT_GET_ENTITY(pa_pdc_cell->mod_info) != PAT_ENTITY_LBA) {
1018 panic("pdc_pat_cell_module() entity returned != PAT_ENTITY_LBA!\n");
1019 }
1020
1021
1022
1023
1024 for (i = 0; i < pa_count; i++) {
1025 struct {
1026 unsigned long type;
1027 unsigned long start;
1028 unsigned long end;
1029 } *p, *io;
1030 struct resource *r;
1031
1032 p = (void *) &(pa_pdc_cell->mod[2+i*3]);
1033 io = (void *) &(io_pdc_cell->mod[2+i*3]);
1034
1035
1036 switch(p->type & 0xff) {
1037 case PAT_PBNUM:
1038 lba_dev->hba.bus_num.start = p->start;
1039 lba_dev->hba.bus_num.end = p->end;
1040 lba_dev->hba.bus_num.flags = IORESOURCE_BUS;
1041 break;
1042
1043 case PAT_LMMIO:
1044
1045 if (!lba_dev->hba.lmmio_space.flags) {
1046 unsigned long lba_len;
1047
1048 lba_len = ~READ_REG32(lba_dev->hba.base_addr
1049 + LBA_LMMIO_MASK);
1050 if ((p->end - p->start) != lba_len)
1051 p->end = extend_lmmio_len(p->start,
1052 p->end, lba_len);
1053
1054 sprintf(lba_dev->hba.lmmio_name,
1055 "PCI%02x LMMIO",
1056 (int)lba_dev->hba.bus_num.start);
1057 lba_dev->hba.lmmio_space_offset = p->start -
1058 io->start;
1059 r = &lba_dev->hba.lmmio_space;
1060 r->name = lba_dev->hba.lmmio_name;
1061 } else if (!lba_dev->hba.elmmio_space.flags) {
1062 sprintf(lba_dev->hba.elmmio_name,
1063 "PCI%02x ELMMIO",
1064 (int)lba_dev->hba.bus_num.start);
1065 r = &lba_dev->hba.elmmio_space;
1066 r->name = lba_dev->hba.elmmio_name;
1067 } else {
1068 printk(KERN_WARNING MODULE_NAME
1069 " only supports 2 LMMIO resources!\n");
1070 break;
1071 }
1072
1073 r->start = p->start;
1074 r->end = p->end;
1075 r->flags = IORESOURCE_MEM;
1076 r->parent = r->sibling = r->child = NULL;
1077 break;
1078
1079 case PAT_GMMIO:
1080
1081 sprintf(lba_dev->hba.gmmio_name, "PCI%02x GMMIO",
1082 (int)lba_dev->hba.bus_num.start);
1083 r = &lba_dev->hba.gmmio_space;
1084 r->name = lba_dev->hba.gmmio_name;
1085 r->start = p->start;
1086 r->end = p->end;
1087 r->flags = IORESOURCE_MEM;
1088 r->parent = r->sibling = r->child = NULL;
1089 break;
1090
1091 case PAT_NPIOP:
1092 printk(KERN_WARNING MODULE_NAME
1093 " range[%d] : ignoring NPIOP (0x%lx)\n",
1094 i, p->start);
1095 break;
1096
1097 case PAT_PIOP:
1098
1099
1100
1101
1102 lba_dev->iop_base = ioremap_nocache(p->start, 64 * 1024 * 1024);
1103
1104 sprintf(lba_dev->hba.io_name, "PCI%02x Ports",
1105 (int)lba_dev->hba.bus_num.start);
1106 r = &lba_dev->hba.io_space;
1107 r->name = lba_dev->hba.io_name;
1108 r->start = HBA_PORT_BASE(lba_dev->hba.hba_num);
1109 r->end = r->start + HBA_PORT_SPACE_SIZE - 1;
1110 r->flags = IORESOURCE_IO;
1111 r->parent = r->sibling = r->child = NULL;
1112 break;
1113
1114 default:
1115 printk(KERN_WARNING MODULE_NAME
1116 " range[%d] : unknown pat range type (0x%lx)\n",
1117 i, p->type & 0xff);
1118 break;
1119 }
1120 }
1121
1122 kfree(pa_pdc_cell);
1123 kfree(io_pdc_cell);
1124}
1125#else
1126
1127#define lba_pat_port_ops lba_astro_port_ops
1128#define lba_pat_resources(pa_dev, lba_dev)
1129#endif
1130
1131
1132extern void sba_distributed_lmmio(struct parisc_device *, struct resource *);
1133extern void sba_directed_lmmio(struct parisc_device *, struct resource *);
1134
1135
1136static void
1137lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
1138{
1139 struct resource *r;
1140 int lba_num;
1141
1142 lba_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
1143
1144
1145
1146
1147
1148
1149
1150
1151 lba_num = READ_REG32(lba_dev->hba.base_addr + LBA_FW_SCRATCH);
1152 r = &(lba_dev->hba.bus_num);
1153 r->name = "LBA PCI Busses";
1154 r->start = lba_num & 0xff;
1155 r->end = (lba_num>>8) & 0xff;
1156 r->flags = IORESOURCE_BUS;
1157
1158
1159
1160
1161 r = &(lba_dev->hba.lmmio_space);
1162 sprintf(lba_dev->hba.lmmio_name, "PCI%02x LMMIO",
1163 (int)lba_dev->hba.bus_num.start);
1164 r->name = lba_dev->hba.lmmio_name;
1165
1166#if 1
1167
1168
1169
1170
1171 sba_distributed_lmmio(pa_dev, r);
1172#else
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_BASE);
1234 if (r->start & 1) {
1235 unsigned long rsize;
1236
1237 r->flags = IORESOURCE_MEM;
1238
1239 r->start &= mmio_mask;
1240 r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start);
1241 rsize = ~ READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_MASK);
1242
1243
1244
1245
1246
1247 rsize /= ROPES_PER_IOC;
1248 r->start += (rsize + 1) * LBA_NUM(pa_dev->hpa.start);
1249 r->end = r->start + rsize;
1250 } else {
1251 r->end = r->start = 0;
1252 }
1253#endif
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270 r = &(lba_dev->hba.elmmio_space);
1271 sprintf(lba_dev->hba.elmmio_name, "PCI%02x ELMMIO",
1272 (int)lba_dev->hba.bus_num.start);
1273 r->name = lba_dev->hba.elmmio_name;
1274
1275#if 1
1276
1277 sba_directed_lmmio(pa_dev, r);
1278#else
1279 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_BASE);
1280
1281 if (r->start & 1) {
1282 unsigned long rsize;
1283 r->flags = IORESOURCE_MEM;
1284
1285 r->start &= mmio_mask;
1286 r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start);
1287 rsize = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_MASK);
1288 r->end = r->start + ~rsize;
1289 }
1290#endif
1291
1292 r = &(lba_dev->hba.io_space);
1293 sprintf(lba_dev->hba.io_name, "PCI%02x Ports",
1294 (int)lba_dev->hba.bus_num.start);
1295 r->name = lba_dev->hba.io_name;
1296 r->flags = IORESOURCE_IO;
1297 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_IOS_BASE) & ~1L;
1298 r->end = r->start + (READ_REG32(lba_dev->hba.base_addr + LBA_IOS_MASK) ^ (HBA_PORT_SPACE_SIZE - 1));
1299
1300
1301 lba_num = HBA_PORT_BASE(lba_dev->hba.hba_num);
1302 r->start |= lba_num;
1303 r->end |= lba_num;
1304}
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319static int __init
1320lba_hw_init(struct lba_device *d)
1321{
1322 u32 stat;
1323 u32 bus_reset;
1324
1325#if 0
1326 printk(KERN_DEBUG "LBA %lx STAT_CTL %Lx ERROR_CFG %Lx STATUS %Lx DMA_CTL %Lx\n",
1327 d->hba.base_addr,
1328 READ_REG64(d->hba.base_addr + LBA_STAT_CTL),
1329 READ_REG64(d->hba.base_addr + LBA_ERROR_CONFIG),
1330 READ_REG64(d->hba.base_addr + LBA_ERROR_STATUS),
1331 READ_REG64(d->hba.base_addr + LBA_DMA_CTL) );
1332 printk(KERN_DEBUG " ARB mask %Lx pri %Lx mode %Lx mtlt %Lx\n",
1333 READ_REG64(d->hba.base_addr + LBA_ARB_MASK),
1334 READ_REG64(d->hba.base_addr + LBA_ARB_PRI),
1335 READ_REG64(d->hba.base_addr + LBA_ARB_MODE),
1336 READ_REG64(d->hba.base_addr + LBA_ARB_MTLT) );
1337 printk(KERN_DEBUG " HINT cfg 0x%Lx\n",
1338 READ_REG64(d->hba.base_addr + LBA_HINT_CFG));
1339 printk(KERN_DEBUG " HINT reg ");
1340 { int i;
1341 for (i=LBA_HINT_BASE; i< (14*8 + LBA_HINT_BASE); i+=8)
1342 printk(" %Lx", READ_REG64(d->hba.base_addr + i));
1343 }
1344 printk("\n");
1345#endif
1346
1347#ifdef CONFIG_64BIT
1348
1349
1350
1351
1352
1353#endif
1354
1355
1356 bus_reset = READ_REG32(d->hba.base_addr + LBA_STAT_CTL + 4) & 1;
1357 if (bus_reset) {
1358 printk(KERN_DEBUG "NOTICE: PCI bus reset still asserted! (clearing)\n");
1359 }
1360
1361 stat = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG);
1362 if (stat & LBA_SMART_MODE) {
1363 printk(KERN_DEBUG "NOTICE: LBA in SMART mode! (cleared)\n");
1364 stat &= ~LBA_SMART_MODE;
1365 WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG);
1366 }
1367
1368
1369 stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL);
1370 WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
1371
1372
1373
1374
1375
1376
1377 if (bus_reset)
1378 mdelay(pci_post_reset_delay);
1379
1380 if (0 == READ_REG32(d->hba.base_addr + LBA_ARB_MASK)) {
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390 printk(KERN_DEBUG "NOTICE: Enabling PCI Arbitration\n");
1391 WRITE_REG32(0x3, d->hba.base_addr + LBA_ARB_MASK);
1392 }
1393
1394
1395
1396
1397
1398
1399 return 0;
1400}
1401
1402
1403
1404
1405
1406
1407
1408
1409static unsigned int lba_next_bus = 0;
1410
1411
1412
1413
1414
1415
1416static int __init
1417lba_driver_probe(struct parisc_device *dev)
1418{
1419 struct lba_device *lba_dev;
1420 LIST_HEAD(resources);
1421 struct pci_bus *lba_bus;
1422 struct pci_ops *cfg_ops;
1423 u32 func_class;
1424 void *tmp_obj;
1425 char *version;
1426 void __iomem *addr = ioremap_nocache(dev->hpa.start, 4096);
1427 int max;
1428
1429
1430 func_class = READ_REG32(addr + LBA_FCLASS);
1431
1432 if (IS_ELROY(dev)) {
1433 func_class &= 0xf;
1434 switch (func_class) {
1435 case 0: version = "TR1.0"; break;
1436 case 1: version = "TR2.0"; break;
1437 case 2: version = "TR2.1"; break;
1438 case 3: version = "TR2.2"; break;
1439 case 4: version = "TR3.0"; break;
1440 case 5: version = "TR4.0"; break;
1441 default: version = "TR4+";
1442 }
1443
1444 printk(KERN_INFO "Elroy version %s (0x%x) found at 0x%lx\n",
1445 version, func_class & 0xf, (long)dev->hpa.start);
1446
1447 if (func_class < 2) {
1448 printk(KERN_WARNING "Can't support LBA older than "
1449 "TR2.1 - continuing under adversity.\n");
1450 }
1451
1452#if 0
1453
1454
1455
1456 if (func_class > 4) {
1457 cfg_ops = &mercury_cfg_ops;
1458 } else
1459#endif
1460 {
1461 cfg_ops = &elroy_cfg_ops;
1462 }
1463
1464 } else if (IS_MERCURY(dev) || IS_QUICKSILVER(dev)) {
1465 int major, minor;
1466
1467 func_class &= 0xff;
1468 major = func_class >> 4, minor = func_class & 0xf;
1469
1470
1471
1472
1473 printk(KERN_INFO "%s version TR%d.%d (0x%x) found at 0x%lx\n",
1474 IS_MERCURY(dev) ? "Mercury" : "Quicksilver", major,
1475 minor, func_class, (long)dev->hpa.start);
1476
1477 cfg_ops = &mercury_cfg_ops;
1478 } else {
1479 printk(KERN_ERR "Unknown LBA found at 0x%lx\n",
1480 (long)dev->hpa.start);
1481 return -ENODEV;
1482 }
1483
1484
1485 tmp_obj = iosapic_register(dev->hpa.start + LBA_IOSAPIC_BASE);
1486
1487
1488
1489
1490
1491 lba_dev = kzalloc(sizeof(struct lba_device), GFP_KERNEL);
1492 if (!lba_dev) {
1493 printk(KERN_ERR "lba_init_chip - couldn't alloc lba_device\n");
1494 return(1);
1495 }
1496
1497
1498
1499
1500 lba_dev->hw_rev = func_class;
1501 lba_dev->hba.base_addr = addr;
1502 lba_dev->hba.dev = dev;
1503 lba_dev->iosapic_obj = tmp_obj;
1504 lba_dev->hba.iommu = sba_get_iommu(dev);
1505 parisc_set_drvdata(dev, lba_dev);
1506
1507
1508 pci_bios = &lba_bios_ops;
1509 pcibios_register_hba(HBA_DATA(lba_dev));
1510 spin_lock_init(&lba_dev->lba_lock);
1511
1512 if (lba_hw_init(lba_dev))
1513 return(1);
1514
1515
1516
1517 if (is_pdc_pat()) {
1518
1519 pci_port = &lba_pat_port_ops;
1520
1521 lba_pat_resources(dev, lba_dev);
1522 } else {
1523 if (!astro_iop_base) {
1524
1525 astro_iop_base = ioremap_nocache(LBA_PORT_BASE, 64 * 1024);
1526 pci_port = &lba_astro_port_ops;
1527 }
1528
1529
1530 lba_legacy_resources(dev, lba_dev);
1531 }
1532
1533 if (lba_dev->hba.bus_num.start < lba_next_bus)
1534 lba_dev->hba.bus_num.start = lba_next_bus;
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545 if (truncate_pat_collision(&iomem_resource,
1546 &(lba_dev->hba.lmmio_space))) {
1547 printk(KERN_WARNING "LBA: lmmio_space [%lx/%lx] duplicate!\n",
1548 (long)lba_dev->hba.lmmio_space.start,
1549 (long)lba_dev->hba.lmmio_space.end);
1550 lba_dev->hba.lmmio_space.flags = 0;
1551 }
1552
1553 pci_add_resource_offset(&resources, &lba_dev->hba.io_space,
1554 HBA_PORT_BASE(lba_dev->hba.hba_num));
1555 if (lba_dev->hba.elmmio_space.flags)
1556 pci_add_resource_offset(&resources, &lba_dev->hba.elmmio_space,
1557 lba_dev->hba.lmmio_space_offset);
1558 if (lba_dev->hba.lmmio_space.flags)
1559 pci_add_resource_offset(&resources, &lba_dev->hba.lmmio_space,
1560 lba_dev->hba.lmmio_space_offset);
1561 if (lba_dev->hba.gmmio_space.flags) {
1562
1563
1564
1565 }
1566
1567 pci_add_resource(&resources, &lba_dev->hba.bus_num);
1568
1569 dev->dev.platform_data = lba_dev;
1570 lba_bus = lba_dev->hba.hba_bus =
1571 pci_create_root_bus(&dev->dev, lba_dev->hba.bus_num.start,
1572 cfg_ops, NULL, &resources);
1573 if (!lba_bus) {
1574 pci_free_resource_list(&resources);
1575 return 0;
1576 }
1577
1578 max = pci_scan_child_bus(lba_bus);
1579
1580
1581 if (is_pdc_pat()) {
1582
1583
1584 DBG_PAT("LBA pci_bus_size_bridges()\n");
1585 pci_bus_size_bridges(lba_bus);
1586
1587 DBG_PAT("LBA pci_bus_assign_resources()\n");
1588 pci_bus_assign_resources(lba_bus);
1589
1590#ifdef DEBUG_LBA_PAT
1591 DBG_PAT("\nLBA PIOP resource tree\n");
1592 lba_dump_res(&lba_dev->hba.io_space, 2);
1593 DBG_PAT("\nLBA LMMIO resource tree\n");
1594 lba_dump_res(&lba_dev->hba.lmmio_space, 2);
1595#endif
1596 }
1597
1598
1599
1600
1601
1602
1603 if (cfg_ops == &elroy_cfg_ops) {
1604 lba_dev->flags |= LBA_FLAG_SKIP_PROBE;
1605 }
1606
1607 lba_next_bus = max + 1;
1608 pci_bus_add_devices(lba_bus);
1609
1610
1611 return 0;
1612}
1613
1614static struct parisc_device_id lba_tbl[] = {
1615 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, ELROY_HVERS, 0xa },
1616 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, MERCURY_HVERS, 0xa },
1617 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, QUICKSILVER_HVERS, 0xa },
1618 { 0, }
1619};
1620
1621static struct parisc_driver lba_driver = {
1622 .name = MODULE_NAME,
1623 .id_table = lba_tbl,
1624 .probe = lba_driver_probe,
1625};
1626
1627
1628
1629
1630
1631void __init lba_init(void)
1632{
1633 register_parisc_driver(&lba_driver);
1634}
1635
1636
1637
1638
1639
1640
1641void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
1642{
1643 void __iomem * base_addr = ioremap_nocache(lba->hpa.start, 4096);
1644
1645 imask <<= 2;
1646
1647
1648 WARN_ON((ibase & 0x001fffff) != 0);
1649 WARN_ON((imask & 0x001fffff) != 0);
1650
1651 DBG("%s() ibase 0x%x imask 0x%x\n", __func__, ibase, imask);
1652 WRITE_REG32( imask, base_addr + LBA_IMASK);
1653 WRITE_REG32( ibase, base_addr + LBA_IBASE);
1654 iounmap(base_addr);
1655}
1656
1657