1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/delay.h>
34#include <linux/types.h>
35#include <linux/kernel.h>
36#include <linux/spinlock.h>
37#include <linux/init.h>
38#include <linux/pci.h>
39#include <linux/ioport.h>
40#include <linux/slab.h>
41
42#include <asm/byteorder.h>
43#include <asm/pdc.h>
44#include <asm/pdcpat.h>
45#include <asm/page.h>
46
47#include <asm/ropes.h>
48#include <asm/hardware.h>
49#include <asm/parisc-device.h>
50#include <asm/io.h>
51
52#undef DEBUG_LBA
53#undef DEBUG_LBA_PORT
54#undef DEBUG_LBA_CFG
55#undef DEBUG_LBA_PAT
56
57#undef FBB_SUPPORT
58
59
60#ifdef DEBUG_LBA
61#define DBG(x...) printk(x)
62#else
63#define DBG(x...)
64#endif
65
66#ifdef DEBUG_LBA_PORT
67#define DBG_PORT(x...) printk(x)
68#else
69#define DBG_PORT(x...)
70#endif
71
72#ifdef DEBUG_LBA_CFG
73#define DBG_CFG(x...) printk(x)
74#else
75#define DBG_CFG(x...)
76#endif
77
78#ifdef DEBUG_LBA_PAT
79#define DBG_PAT(x...) printk(x)
80#else
81#define DBG_PAT(x...)
82#endif
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99#define MODULE_NAME "LBA"
100
101
102#define LBA_PORT_BASE (PCI_F_EXTEND | 0xfee00000UL)
103static void __iomem *astro_iop_base __read_mostly;
104
105static u32 lba_t32;
106
107
108#define LBA_FLAG_SKIP_PROBE 0x10
109
110#define LBA_SKIP_PROBE(d) ((d)->flags & LBA_FLAG_SKIP_PROBE)
111
112
113
114#define LBA_DEV(d) ({ \
115 void *__pdata = d; \
116 BUG_ON(!__pdata); \
117 (struct lba_device *)__pdata; })
118
119
120
121
122
123#define LBA_MAX_NUM_BUSES 8
124
125
126
127
128
129
130
131#define READ_U8(addr) __raw_readb(addr)
132#define READ_U16(addr) __raw_readw(addr)
133#define READ_U32(addr) __raw_readl(addr)
134#define WRITE_U8(value, addr) __raw_writeb(value, addr)
135#define WRITE_U16(value, addr) __raw_writew(value, addr)
136#define WRITE_U32(value, addr) __raw_writel(value, addr)
137
138#define READ_REG8(addr) readb(addr)
139#define READ_REG16(addr) readw(addr)
140#define READ_REG32(addr) readl(addr)
141#define READ_REG64(addr) readq(addr)
142#define WRITE_REG8(value, addr) writeb(value, addr)
143#define WRITE_REG16(value, addr) writew(value, addr)
144#define WRITE_REG32(value, addr) writel(value, addr)
145
146
147#define LBA_CFG_TOK(bus,dfn) ((u32) ((bus)<<16 | (dfn)<<8))
148#define LBA_CFG_BUS(tok) ((u8) ((tok)>>16))
149#define LBA_CFG_DEV(tok) ((u8) ((tok)>>11) & 0x1f)
150#define LBA_CFG_FUNC(tok) ((u8) ((tok)>>8 ) & 0x7)
151
152
153
154
155
156
157#define ROPES_PER_IOC 8
158#define LBA_NUM(x) ((((unsigned long) x) >> 13) & (ROPES_PER_IOC-1))
159
160
161static void
162lba_dump_res(struct resource *r, int d)
163{
164 int i;
165
166 if (NULL == r)
167 return;
168
169 printk(KERN_DEBUG "(%p)", r->parent);
170 for (i = d; i ; --i) printk(" ");
171 printk(KERN_DEBUG "%p [%lx,%lx]/%lx\n", r,
172 (long)r->start, (long)r->end, r->flags);
173 lba_dump_res(r->child, d+2);
174 lba_dump_res(r->sibling, d);
175}
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192static int lba_device_present(u8 bus, u8 dfn, struct lba_device *d)
193{
194 u8 first_bus = d->hba.hba_bus->busn_res.start;
195 u8 last_sub_bus = d->hba.hba_bus->busn_res.end;
196
197 if ((bus < first_bus) ||
198 (bus > last_sub_bus) ||
199 ((bus - first_bus) >= LBA_MAX_NUM_BUSES)) {
200 return 0;
201 }
202
203 return 1;
204}
205
206
207
208#define LBA_CFG_SETUP(d, tok) { \
209 \
210 error_config = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG); \
211\
212 \
213 status_control = READ_REG32(d->hba.base_addr + LBA_STAT_CTL); \
214\
215
216
217 \
218 \
219 arb_mask = READ_REG32(d->hba.base_addr + LBA_ARB_MASK); \
220\
221
222
223
224 \
225 WRITE_REG32(0x1, d->hba.base_addr + LBA_ARB_MASK); \
226\
227
228
229
230 \
231 WRITE_REG32(error_config | LBA_SMART_MODE, d->hba.base_addr + LBA_ERROR_CONFIG); \
232}
233
234
235#define LBA_CFG_PROBE(d, tok) { \
236
237
238
239 \
240 WRITE_REG32(tok | PCI_VENDOR_ID, (d)->hba.base_addr + LBA_PCI_CFG_ADDR);\
241
242
243
244 \
245 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
246
247
248
249 \
250 WRITE_REG32(~0, (d)->hba.base_addr + LBA_PCI_CFG_DATA); \
251
252
253
254 \
255 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
256}
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283#define LBA_MASTER_ABORT_ERROR 0xc
284#define LBA_FATAL_ERROR 0x10
285
286#define LBA_CFG_MASTER_ABORT_CHECK(d, base, tok, error) { \
287 u32 error_status = 0; \
288
289
290
291 \
292 WRITE_REG32(status_control | CLEAR_ERRLOG_ENABLE, base + LBA_STAT_CTL); \
293 error_status = READ_REG32(base + LBA_ERROR_STATUS); \
294 if ((error_status & 0x1f) != 0) { \
295
296
297 \
298 error = 1; \
299 if ((error_status & LBA_FATAL_ERROR) == 0) { \
300
301
302
303 \
304 WRITE_REG32(status_control | CLEAR_ERRLOG, base + LBA_STAT_CTL); \
305 } \
306 } \
307}
308
309#define LBA_CFG_TR4_ADDR_SETUP(d, addr) \
310 WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR);
311
312#define LBA_CFG_ADDR_SETUP(d, addr) { \
313 WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
314
315
316
317 \
318 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
319}
320
321
322#define LBA_CFG_RESTORE(d, base) { \
323
324
325 \
326 WRITE_REG32(status_control, base + LBA_STAT_CTL); \
327
328
329 \
330 WRITE_REG32(error_config, base + LBA_ERROR_CONFIG); \
331
332
333 \
334 WRITE_REG32(arb_mask, base + LBA_ARB_MASK); \
335}
336
337
338
339static unsigned int
340lba_rd_cfg(struct lba_device *d, u32 tok, u8 reg, u32 size)
341{
342 u32 data = ~0U;
343 int error = 0;
344 u32 arb_mask = 0;
345 u32 error_config = 0;
346 u32 status_control = 0;
347
348 LBA_CFG_SETUP(d, tok);
349 LBA_CFG_PROBE(d, tok);
350 LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error);
351 if (!error) {
352 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
353
354 LBA_CFG_ADDR_SETUP(d, tok | reg);
355 switch (size) {
356 case 1: data = (u32) READ_REG8(data_reg + (reg & 3)); break;
357 case 2: data = (u32) READ_REG16(data_reg+ (reg & 2)); break;
358 case 4: data = READ_REG32(data_reg); break;
359 }
360 }
361 LBA_CFG_RESTORE(d, d->hba.base_addr);
362 return(data);
363}
364
365
366static int elroy_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
367{
368 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
369 u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
370 u32 tok = LBA_CFG_TOK(local_bus, devfn);
371 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
372
373 if ((pos > 255) || (devfn > 255))
374 return -EINVAL;
375
376
377 {
378
379
380 *data = lba_rd_cfg(d, tok, pos, size);
381 DBG_CFG("%s(%x+%2x) -> 0x%x (a)\n", __func__, tok, pos, *data);
382 return 0;
383 }
384
385 if (LBA_SKIP_PROBE(d) && !lba_device_present(bus->busn_res.start, devfn, d)) {
386 DBG_CFG("%s(%x+%2x) -> -1 (b)\n", __func__, tok, pos);
387
388 *data = ~0U;
389 return(0);
390 }
391
392
393
394
395
396 LBA_CFG_ADDR_SETUP(d, tok | pos);
397 switch(size) {
398 case 1: *data = READ_REG8 (data_reg + (pos & 3)); break;
399 case 2: *data = READ_REG16(data_reg + (pos & 2)); break;
400 case 4: *data = READ_REG32(data_reg); break;
401 }
402 DBG_CFG("%s(%x+%2x) -> 0x%x (c)\n", __func__, tok, pos, *data);
403 return 0;
404}
405
406
407static void
408lba_wr_cfg(struct lba_device *d, u32 tok, u8 reg, u32 data, u32 size)
409{
410 int error = 0;
411 u32 arb_mask = 0;
412 u32 error_config = 0;
413 u32 status_control = 0;
414 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
415
416 LBA_CFG_SETUP(d, tok);
417 LBA_CFG_ADDR_SETUP(d, tok | reg);
418 switch (size) {
419 case 1: WRITE_REG8 (data, data_reg + (reg & 3)); break;
420 case 2: WRITE_REG16(data, data_reg + (reg & 2)); break;
421 case 4: WRITE_REG32(data, data_reg); break;
422 }
423 LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error);
424 LBA_CFG_RESTORE(d, d->hba.base_addr);
425}
426
427
428
429
430
431
432
433static int elroy_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
434{
435 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
436 u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
437 u32 tok = LBA_CFG_TOK(local_bus,devfn);
438
439 if ((pos > 255) || (devfn > 255))
440 return -EINVAL;
441
442 if (!LBA_SKIP_PROBE(d)) {
443
444 lba_wr_cfg(d, tok, pos, (u32) data, size);
445 DBG_CFG("%s(%x+%2x) = 0x%x (a)\n", __func__, tok, pos,data);
446 return 0;
447 }
448
449 if (LBA_SKIP_PROBE(d) && (!lba_device_present(bus->busn_res.start, devfn, d))) {
450 DBG_CFG("%s(%x+%2x) = 0x%x (b)\n", __func__, tok, pos,data);
451 return 1;
452 }
453
454 DBG_CFG("%s(%x+%2x) = 0x%x (c)\n", __func__, tok, pos, data);
455
456
457 LBA_CFG_ADDR_SETUP(d, tok | pos);
458 switch(size) {
459 case 1: WRITE_REG8 (data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 3));
460 break;
461 case 2: WRITE_REG16(data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 2));
462 break;
463 case 4: WRITE_REG32(data, d->hba.base_addr + LBA_PCI_CFG_DATA);
464 break;
465 }
466
467 lba_t32 = READ_REG32(d->hba.base_addr + LBA_PCI_CFG_ADDR);
468 return 0;
469}
470
471
472static struct pci_ops elroy_cfg_ops = {
473 .read = elroy_cfg_read,
474 .write = elroy_cfg_write,
475};
476
477
478
479
480
481
482
483static int mercury_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
484{
485 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
486 u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
487 u32 tok = LBA_CFG_TOK(local_bus, devfn);
488 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
489
490 if ((pos > 255) || (devfn > 255))
491 return -EINVAL;
492
493 LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
494 switch(size) {
495 case 1:
496 *data = READ_REG8(data_reg + (pos & 3));
497 break;
498 case 2:
499 *data = READ_REG16(data_reg + (pos & 2));
500 break;
501 case 4:
502 *data = READ_REG32(data_reg); break;
503 break;
504 }
505
506 DBG_CFG("mercury_cfg_read(%x+%2x) -> 0x%x\n", tok, pos, *data);
507 return 0;
508}
509
510
511
512
513
514
515static int mercury_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
516{
517 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
518 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
519 u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
520 u32 tok = LBA_CFG_TOK(local_bus,devfn);
521
522 if ((pos > 255) || (devfn > 255))
523 return -EINVAL;
524
525 DBG_CFG("%s(%x+%2x) <- 0x%x (c)\n", __func__, tok, pos, data);
526
527 LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
528 switch(size) {
529 case 1:
530 WRITE_REG8 (data, data_reg + (pos & 3));
531 break;
532 case 2:
533 WRITE_REG16(data, data_reg + (pos & 2));
534 break;
535 case 4:
536 WRITE_REG32(data, data_reg);
537 break;
538 }
539
540
541 lba_t32 = READ_U32(d->hba.base_addr + LBA_PCI_CFG_ADDR);
542 return 0;
543}
544
545static struct pci_ops mercury_cfg_ops = {
546 .read = mercury_cfg_read,
547 .write = mercury_cfg_write,
548};
549
550
551static void
552lba_bios_init(void)
553{
554 DBG(MODULE_NAME ": lba_bios_init\n");
555}
556
557
558#ifdef CONFIG_64BIT
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573static unsigned long
574truncate_pat_collision(struct resource *root, struct resource *new)
575{
576 unsigned long start = new->start;
577 unsigned long end = new->end;
578 struct resource *tmp = root->child;
579
580 if (end <= start || start < root->start || !tmp)
581 return 0;
582
583
584 while (tmp && tmp->end < start)
585 tmp = tmp->sibling;
586
587
588 if (!tmp) return 0;
589
590
591
592
593 if (tmp->start >= end) return 0;
594
595 if (tmp->start <= start) {
596
597 new->start = tmp->end + 1;
598
599 if (tmp->end >= end) {
600
601 return 1;
602 }
603 }
604
605 if (tmp->end < end ) {
606
607 new->end = tmp->start - 1;
608 }
609
610 printk(KERN_WARNING "LBA: Truncating lmmio_space [%lx/%lx] "
611 "to [%lx,%lx]\n",
612 start, end,
613 (long)new->start, (long)new->end );
614
615 return 0;
616}
617
618
619
620
621
622
623
624static unsigned long
625extend_lmmio_len(unsigned long start, unsigned long end, unsigned long lba_len)
626{
627 struct resource *tmp;
628
629
630 if (boot_cpu_data.cpu_type < mako)
631 return end;
632
633 pr_debug("LMMIO mismatch: PAT length = 0x%lx, MASK register = 0x%lx\n",
634 end - start, lba_len);
635
636 lba_len = min(lba_len+1, 256UL*1024*1024);
637
638 pr_debug("LBA: lmmio_space [0x%lx-0x%lx] - original\n", start, end);
639
640
641 end += lba_len;
642 if (end < start)
643 end = -1ULL;
644
645 pr_debug("LBA: lmmio_space [0x%lx-0x%lx] - current\n", start, end);
646
647
648 for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) {
649 pr_debug("LBA: testing %pR\n", tmp);
650 if (tmp->start == start)
651 continue;
652 if (tmp->end < start)
653 continue;
654 if (tmp->start > end)
655 continue;
656 if (end >= tmp->start)
657 end = tmp->start - 1;
658 }
659
660 pr_info("LBA: lmmio_space [0x%lx-0x%lx] - new\n", start, end);
661
662
663 return end;
664}
665
666#else
667#define truncate_pat_collision(r,n) (0)
668#endif
669
670static void pcibios_allocate_bridge_resources(struct pci_dev *dev)
671{
672 int idx;
673 struct resource *r;
674
675 for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
676 r = &dev->resource[idx];
677 if (!r->flags)
678 continue;
679 if (r->parent)
680 continue;
681 if (!r->start || pci_claim_bridge_resource(dev, idx) < 0) {
682
683
684
685
686
687
688 r->start = r->end = 0;
689 r->flags = 0;
690 }
691 }
692}
693
694static void pcibios_allocate_bus_resources(struct pci_bus *bus)
695{
696 struct pci_bus *child;
697
698
699 if (bus->self)
700 pcibios_allocate_bridge_resources(bus->self);
701 list_for_each_entry(child, &bus->children, node)
702 pcibios_allocate_bus_resources(child);
703}
704
705
706
707
708
709
710
711
712
713
714
715static void
716lba_fixup_bus(struct pci_bus *bus)
717{
718 struct pci_dev *dev;
719#ifdef FBB_SUPPORT
720 u16 status;
721#endif
722 struct lba_device *ldev = LBA_DEV(parisc_walk_tree(bus->bridge));
723
724 DBG("lba_fixup_bus(0x%p) bus %d platform_data 0x%p\n",
725 bus, (int)bus->busn_res.start, bus->bridge->platform_data);
726
727
728
729
730
731 if (bus->parent) {
732
733 pci_read_bridge_bases(bus);
734
735
736 pcibios_allocate_bus_resources(bus);
737 } else {
738
739 int err;
740
741 DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",
742 ldev->hba.io_space.name,
743 ldev->hba.io_space.start, ldev->hba.io_space.end,
744 ldev->hba.io_space.flags);
745 DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",
746 ldev->hba.lmmio_space.name,
747 ldev->hba.lmmio_space.start, ldev->hba.lmmio_space.end,
748 ldev->hba.lmmio_space.flags);
749
750 err = request_resource(&ioport_resource, &(ldev->hba.io_space));
751 if (err < 0) {
752 lba_dump_res(&ioport_resource, 2);
753 BUG();
754 }
755
756 if (ldev->hba.elmmio_space.flags) {
757 err = request_resource(&iomem_resource,
758 &(ldev->hba.elmmio_space));
759 if (err < 0) {
760
761 printk("FAILED: lba_fixup_bus() request for "
762 "elmmio_space [%lx/%lx]\n",
763 (long)ldev->hba.elmmio_space.start,
764 (long)ldev->hba.elmmio_space.end);
765
766
767
768 }
769 }
770
771 if (ldev->hba.lmmio_space.flags) {
772 err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space));
773 if (err < 0) {
774 printk(KERN_ERR "FAILED: lba_fixup_bus() request for "
775 "lmmio_space [%lx/%lx]\n",
776 (long)ldev->hba.lmmio_space.start,
777 (long)ldev->hba.lmmio_space.end);
778 }
779 }
780
781#ifdef CONFIG_64BIT
782
783 if (ldev->hba.gmmio_space.flags) {
784 err = request_resource(&iomem_resource, &(ldev->hba.gmmio_space));
785 if (err < 0) {
786 printk("FAILED: lba_fixup_bus() request for "
787 "gmmio_space [%lx/%lx]\n",
788 (long)ldev->hba.gmmio_space.start,
789 (long)ldev->hba.gmmio_space.end);
790 lba_dump_res(&iomem_resource, 2);
791 BUG();
792 }
793 }
794#endif
795
796 }
797
798 list_for_each_entry(dev, &bus->devices, bus_list) {
799 int i;
800
801 DBG("lba_fixup_bus() %s\n", pci_name(dev));
802
803
804 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
805 struct resource *res = &dev->resource[i];
806
807
808 if (!res->start)
809 continue;
810
811
812
813
814
815
816 pci_claim_resource(dev, i);
817 }
818
819#ifdef FBB_SUPPORT
820
821
822
823
824 (void) pci_read_config_word(dev, PCI_STATUS, &status);
825 bus->bridge_ctl &= ~(status & PCI_STATUS_FAST_BACK);
826#endif
827
828
829
830
831 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
832 pcibios_init_bridge(dev);
833 continue;
834 }
835
836
837 iosapic_fixup_irq(ldev->iosapic_obj, dev);
838 }
839
840#ifdef FBB_SUPPORT
841
842
843
844
845 if (fbb_enable) {
846 if (bus->parent) {
847 u8 control;
848
849 (void) pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &control);
850 (void) pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, control | PCI_STATUS_FAST_BACK);
851
852 } else {
853
854 }
855 fbb_enable = PCI_COMMAND_FAST_BACK;
856 }
857
858
859 list_for_each_entry(dev, &bus->devices, bus_list) {
860 (void) pci_read_config_word(dev, PCI_COMMAND, &status);
861 status |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR | fbb_enable;
862 (void) pci_write_config_word(dev, PCI_COMMAND, status);
863 }
864#endif
865}
866
867
868static struct pci_bios_ops lba_bios_ops = {
869 .init = lba_bios_init,
870 .fixup_bus = lba_fixup_bus,
871};
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890#define LBA_PORT_IN(size, mask) \
891static u##size lba_astro_in##size (struct pci_hba_data *d, u16 addr) \
892{ \
893 u##size t; \
894 t = READ_REG##size(astro_iop_base + addr); \
895 DBG_PORT(" 0x%x\n", t); \
896 return (t); \
897}
898
899LBA_PORT_IN( 8, 3)
900LBA_PORT_IN(16, 2)
901LBA_PORT_IN(32, 0)
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931#define LBA_PORT_OUT(size, mask) \
932static void lba_astro_out##size (struct pci_hba_data *d, u16 addr, u##size val) \
933{ \
934 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __func__, d, addr, val); \
935 WRITE_REG##size(val, astro_iop_base + addr); \
936 if (LBA_DEV(d)->hw_rev < 3) \
937 lba_t32 = READ_U32(d->base_addr + LBA_FUNC_ID); \
938}
939
940LBA_PORT_OUT( 8, 3)
941LBA_PORT_OUT(16, 2)
942LBA_PORT_OUT(32, 0)
943
944
945static struct pci_port_ops lba_astro_port_ops = {
946 .inb = lba_astro_in8,
947 .inw = lba_astro_in16,
948 .inl = lba_astro_in32,
949 .outb = lba_astro_out8,
950 .outw = lba_astro_out16,
951 .outl = lba_astro_out32
952};
953
954
955#ifdef CONFIG_64BIT
956#define PIOP_TO_GMMIO(lba, addr) \
957 ((lba)->iop_base + (((addr)&0xFFFC)<<10) + ((addr)&3))
958
959
960
961
962
963
964
965
966
967
968
969
970
971#undef LBA_PORT_IN
972#define LBA_PORT_IN(size, mask) \
973static u##size lba_pat_in##size (struct pci_hba_data *l, u16 addr) \
974{ \
975 u##size t; \
976 DBG_PORT("%s(0x%p, 0x%x) ->", __func__, l, addr); \
977 t = READ_REG##size(PIOP_TO_GMMIO(LBA_DEV(l), addr)); \
978 DBG_PORT(" 0x%x\n", t); \
979 return (t); \
980}
981
982LBA_PORT_IN( 8, 3)
983LBA_PORT_IN(16, 2)
984LBA_PORT_IN(32, 0)
985
986
987#undef LBA_PORT_OUT
988#define LBA_PORT_OUT(size, mask) \
989static void lba_pat_out##size (struct pci_hba_data *l, u16 addr, u##size val) \
990{ \
991 void __iomem *where = PIOP_TO_GMMIO(LBA_DEV(l), addr); \
992 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __func__, l, addr, val); \
993 WRITE_REG##size(val, where); \
994 \
995 lba_t32 = READ_U32(l->base_addr + LBA_FUNC_ID); \
996}
997
998LBA_PORT_OUT( 8, 3)
999LBA_PORT_OUT(16, 2)
1000LBA_PORT_OUT(32, 0)
1001
1002
1003static struct pci_port_ops lba_pat_port_ops = {
1004 .inb = lba_pat_in8,
1005 .inw = lba_pat_in16,
1006 .inl = lba_pat_in32,
1007 .outb = lba_pat_out8,
1008 .outw = lba_pat_out16,
1009 .outl = lba_pat_out32
1010};
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020static void
1021lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
1022{
1023 unsigned long bytecnt;
1024 long io_count;
1025 long status;
1026 long pa_count;
1027 pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
1028 pdc_pat_cell_mod_maddr_block_t *io_pdc_cell;
1029 int i;
1030
1031 pa_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL);
1032 if (!pa_pdc_cell)
1033 return;
1034
1035 io_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL);
1036 if (!io_pdc_cell) {
1037 kfree(pa_pdc_cell);
1038 return;
1039 }
1040
1041
1042 status = pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
1043 PA_VIEW, pa_pdc_cell);
1044 pa_count = pa_pdc_cell->mod[1];
1045
1046 status |= pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
1047 IO_VIEW, io_pdc_cell);
1048 io_count = io_pdc_cell->mod[1];
1049
1050
1051 if (status != PDC_OK) {
1052 panic("pdc_pat_cell_module() call failed for LBA!\n");
1053 }
1054
1055 if (PAT_GET_ENTITY(pa_pdc_cell->mod_info) != PAT_ENTITY_LBA) {
1056 panic("pdc_pat_cell_module() entity returned != PAT_ENTITY_LBA!\n");
1057 }
1058
1059
1060
1061
1062 for (i = 0; i < pa_count; i++) {
1063 struct {
1064 unsigned long type;
1065 unsigned long start;
1066 unsigned long end;
1067 } *p, *io;
1068 struct resource *r;
1069
1070 p = (void *) &(pa_pdc_cell->mod[2+i*3]);
1071 io = (void *) &(io_pdc_cell->mod[2+i*3]);
1072
1073
1074 switch(p->type & 0xff) {
1075 case PAT_PBNUM:
1076 lba_dev->hba.bus_num.start = p->start;
1077 lba_dev->hba.bus_num.end = p->end;
1078 lba_dev->hba.bus_num.flags = IORESOURCE_BUS;
1079 break;
1080
1081 case PAT_LMMIO:
1082
1083 if (!lba_dev->hba.lmmio_space.flags) {
1084 unsigned long lba_len;
1085
1086 lba_len = ~READ_REG32(lba_dev->hba.base_addr
1087 + LBA_LMMIO_MASK);
1088 if ((p->end - p->start) != lba_len)
1089 p->end = extend_lmmio_len(p->start,
1090 p->end, lba_len);
1091
1092 sprintf(lba_dev->hba.lmmio_name,
1093 "PCI%02x LMMIO",
1094 (int)lba_dev->hba.bus_num.start);
1095 lba_dev->hba.lmmio_space_offset = p->start -
1096 io->start;
1097 r = &lba_dev->hba.lmmio_space;
1098 r->name = lba_dev->hba.lmmio_name;
1099 } else if (!lba_dev->hba.elmmio_space.flags) {
1100 sprintf(lba_dev->hba.elmmio_name,
1101 "PCI%02x ELMMIO",
1102 (int)lba_dev->hba.bus_num.start);
1103 r = &lba_dev->hba.elmmio_space;
1104 r->name = lba_dev->hba.elmmio_name;
1105 } else {
1106 printk(KERN_WARNING MODULE_NAME
1107 " only supports 2 LMMIO resources!\n");
1108 break;
1109 }
1110
1111 r->start = p->start;
1112 r->end = p->end;
1113 r->flags = IORESOURCE_MEM;
1114 r->parent = r->sibling = r->child = NULL;
1115 break;
1116
1117 case PAT_GMMIO:
1118
1119 sprintf(lba_dev->hba.gmmio_name, "PCI%02x GMMIO",
1120 (int)lba_dev->hba.bus_num.start);
1121 r = &lba_dev->hba.gmmio_space;
1122 r->name = lba_dev->hba.gmmio_name;
1123 r->start = p->start;
1124 r->end = p->end;
1125 r->flags = IORESOURCE_MEM;
1126 r->parent = r->sibling = r->child = NULL;
1127 break;
1128
1129 case PAT_NPIOP:
1130 printk(KERN_WARNING MODULE_NAME
1131 " range[%d] : ignoring NPIOP (0x%lx)\n",
1132 i, p->start);
1133 break;
1134
1135 case PAT_PIOP:
1136
1137
1138
1139
1140 lba_dev->iop_base = ioremap_nocache(p->start, 64 * 1024 * 1024);
1141
1142 sprintf(lba_dev->hba.io_name, "PCI%02x Ports",
1143 (int)lba_dev->hba.bus_num.start);
1144 r = &lba_dev->hba.io_space;
1145 r->name = lba_dev->hba.io_name;
1146 r->start = HBA_PORT_BASE(lba_dev->hba.hba_num);
1147 r->end = r->start + HBA_PORT_SPACE_SIZE - 1;
1148 r->flags = IORESOURCE_IO;
1149 r->parent = r->sibling = r->child = NULL;
1150 break;
1151
1152 default:
1153 printk(KERN_WARNING MODULE_NAME
1154 " range[%d] : unknown pat range type (0x%lx)\n",
1155 i, p->type & 0xff);
1156 break;
1157 }
1158 }
1159
1160 kfree(pa_pdc_cell);
1161 kfree(io_pdc_cell);
1162}
1163#else
1164
1165#define lba_pat_port_ops lba_astro_port_ops
1166#define lba_pat_resources(pa_dev, lba_dev)
1167#endif
1168
1169
1170extern void sba_distributed_lmmio(struct parisc_device *, struct resource *);
1171extern void sba_directed_lmmio(struct parisc_device *, struct resource *);
1172
1173
1174static void
1175lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
1176{
1177 struct resource *r;
1178 int lba_num;
1179
1180 lba_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
1181
1182
1183
1184
1185
1186
1187
1188
1189 lba_num = READ_REG32(lba_dev->hba.base_addr + LBA_FW_SCRATCH);
1190 r = &(lba_dev->hba.bus_num);
1191 r->name = "LBA PCI Busses";
1192 r->start = lba_num & 0xff;
1193 r->end = (lba_num>>8) & 0xff;
1194 r->flags = IORESOURCE_BUS;
1195
1196
1197
1198
1199 r = &(lba_dev->hba.lmmio_space);
1200 sprintf(lba_dev->hba.lmmio_name, "PCI%02x LMMIO",
1201 (int)lba_dev->hba.bus_num.start);
1202 r->name = lba_dev->hba.lmmio_name;
1203
1204#if 1
1205
1206
1207
1208
1209 sba_distributed_lmmio(pa_dev, r);
1210#else
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_BASE);
1272 if (r->start & 1) {
1273 unsigned long rsize;
1274
1275 r->flags = IORESOURCE_MEM;
1276
1277 r->start &= mmio_mask;
1278 r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start);
1279 rsize = ~ READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_MASK);
1280
1281
1282
1283
1284
1285 rsize /= ROPES_PER_IOC;
1286 r->start += (rsize + 1) * LBA_NUM(pa_dev->hpa.start);
1287 r->end = r->start + rsize;
1288 } else {
1289 r->end = r->start = 0;
1290 }
1291#endif
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308 r = &(lba_dev->hba.elmmio_space);
1309 sprintf(lba_dev->hba.elmmio_name, "PCI%02x ELMMIO",
1310 (int)lba_dev->hba.bus_num.start);
1311 r->name = lba_dev->hba.elmmio_name;
1312
1313#if 1
1314
1315 sba_directed_lmmio(pa_dev, r);
1316#else
1317 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_BASE);
1318
1319 if (r->start & 1) {
1320 unsigned long rsize;
1321 r->flags = IORESOURCE_MEM;
1322
1323 r->start &= mmio_mask;
1324 r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start);
1325 rsize = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_MASK);
1326 r->end = r->start + ~rsize;
1327 }
1328#endif
1329
1330 r = &(lba_dev->hba.io_space);
1331 sprintf(lba_dev->hba.io_name, "PCI%02x Ports",
1332 (int)lba_dev->hba.bus_num.start);
1333 r->name = lba_dev->hba.io_name;
1334 r->flags = IORESOURCE_IO;
1335 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_IOS_BASE) & ~1L;
1336 r->end = r->start + (READ_REG32(lba_dev->hba.base_addr + LBA_IOS_MASK) ^ (HBA_PORT_SPACE_SIZE - 1));
1337
1338
1339 lba_num = HBA_PORT_BASE(lba_dev->hba.hba_num);
1340 r->start |= lba_num;
1341 r->end |= lba_num;
1342}
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357static int __init
1358lba_hw_init(struct lba_device *d)
1359{
1360 u32 stat;
1361 u32 bus_reset;
1362
1363#if 0
1364 printk(KERN_DEBUG "LBA %lx STAT_CTL %Lx ERROR_CFG %Lx STATUS %Lx DMA_CTL %Lx\n",
1365 d->hba.base_addr,
1366 READ_REG64(d->hba.base_addr + LBA_STAT_CTL),
1367 READ_REG64(d->hba.base_addr + LBA_ERROR_CONFIG),
1368 READ_REG64(d->hba.base_addr + LBA_ERROR_STATUS),
1369 READ_REG64(d->hba.base_addr + LBA_DMA_CTL) );
1370 printk(KERN_DEBUG " ARB mask %Lx pri %Lx mode %Lx mtlt %Lx\n",
1371 READ_REG64(d->hba.base_addr + LBA_ARB_MASK),
1372 READ_REG64(d->hba.base_addr + LBA_ARB_PRI),
1373 READ_REG64(d->hba.base_addr + LBA_ARB_MODE),
1374 READ_REG64(d->hba.base_addr + LBA_ARB_MTLT) );
1375 printk(KERN_DEBUG " HINT cfg 0x%Lx\n",
1376 READ_REG64(d->hba.base_addr + LBA_HINT_CFG));
1377 printk(KERN_DEBUG " HINT reg ");
1378 { int i;
1379 for (i=LBA_HINT_BASE; i< (14*8 + LBA_HINT_BASE); i+=8)
1380 printk(" %Lx", READ_REG64(d->hba.base_addr + i));
1381 }
1382 printk("\n");
1383#endif
1384
1385#ifdef CONFIG_64BIT
1386
1387
1388
1389
1390
1391#endif
1392
1393
1394 bus_reset = READ_REG32(d->hba.base_addr + LBA_STAT_CTL + 4) & 1;
1395 if (bus_reset) {
1396 printk(KERN_DEBUG "NOTICE: PCI bus reset still asserted! (clearing)\n");
1397 }
1398
1399 stat = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG);
1400 if (stat & LBA_SMART_MODE) {
1401 printk(KERN_DEBUG "NOTICE: LBA in SMART mode! (cleared)\n");
1402 stat &= ~LBA_SMART_MODE;
1403 WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG);
1404 }
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421 stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL);
1422#if defined(ENABLE_HARDFAIL)
1423 WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
1424#else
1425 WRITE_REG32(stat & ~HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
1426#endif
1427
1428
1429
1430
1431
1432
1433 if (bus_reset)
1434 mdelay(pci_post_reset_delay);
1435
1436 if (0 == READ_REG32(d->hba.base_addr + LBA_ARB_MASK)) {
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446 printk(KERN_DEBUG "NOTICE: Enabling PCI Arbitration\n");
1447 WRITE_REG32(0x3, d->hba.base_addr + LBA_ARB_MASK);
1448 }
1449
1450
1451
1452
1453
1454
1455 return 0;
1456}
1457
1458
1459
1460
1461
1462
1463
1464
1465static unsigned int lba_next_bus = 0;
1466
1467
1468
1469
1470
1471
1472static int __init
1473lba_driver_probe(struct parisc_device *dev)
1474{
1475 struct lba_device *lba_dev;
1476 LIST_HEAD(resources);
1477 struct pci_bus *lba_bus;
1478 struct pci_ops *cfg_ops;
1479 u32 func_class;
1480 void *tmp_obj;
1481 char *version;
1482 void __iomem *addr = ioremap_nocache(dev->hpa.start, 4096);
1483 int max;
1484
1485
1486 func_class = READ_REG32(addr + LBA_FCLASS);
1487
1488 if (IS_ELROY(dev)) {
1489 func_class &= 0xf;
1490 switch (func_class) {
1491 case 0: version = "TR1.0"; break;
1492 case 1: version = "TR2.0"; break;
1493 case 2: version = "TR2.1"; break;
1494 case 3: version = "TR2.2"; break;
1495 case 4: version = "TR3.0"; break;
1496 case 5: version = "TR4.0"; break;
1497 default: version = "TR4+";
1498 }
1499
1500 printk(KERN_INFO "Elroy version %s (0x%x) found at 0x%lx\n",
1501 version, func_class & 0xf, (long)dev->hpa.start);
1502
1503 if (func_class < 2) {
1504 printk(KERN_WARNING "Can't support LBA older than "
1505 "TR2.1 - continuing under adversity.\n");
1506 }
1507
1508#if 0
1509
1510
1511
1512 if (func_class > 4) {
1513 cfg_ops = &mercury_cfg_ops;
1514 } else
1515#endif
1516 {
1517 cfg_ops = &elroy_cfg_ops;
1518 }
1519
1520 } else if (IS_MERCURY(dev) || IS_QUICKSILVER(dev)) {
1521 int major, minor;
1522
1523 func_class &= 0xff;
1524 major = func_class >> 4, minor = func_class & 0xf;
1525
1526
1527
1528
1529 printk(KERN_INFO "%s version TR%d.%d (0x%x) found at 0x%lx\n",
1530 IS_MERCURY(dev) ? "Mercury" : "Quicksilver", major,
1531 minor, func_class, (long)dev->hpa.start);
1532
1533 cfg_ops = &mercury_cfg_ops;
1534 } else {
1535 printk(KERN_ERR "Unknown LBA found at 0x%lx\n",
1536 (long)dev->hpa.start);
1537 return -ENODEV;
1538 }
1539
1540
1541 tmp_obj = iosapic_register(dev->hpa.start + LBA_IOSAPIC_BASE);
1542
1543
1544
1545
1546
1547 lba_dev = kzalloc(sizeof(struct lba_device), GFP_KERNEL);
1548 if (!lba_dev) {
1549 printk(KERN_ERR "lba_init_chip - couldn't alloc lba_device\n");
1550 return(1);
1551 }
1552
1553
1554
1555
1556 lba_dev->hw_rev = func_class;
1557 lba_dev->hba.base_addr = addr;
1558 lba_dev->hba.dev = dev;
1559 lba_dev->iosapic_obj = tmp_obj;
1560 lba_dev->hba.iommu = sba_get_iommu(dev);
1561 parisc_set_drvdata(dev, lba_dev);
1562
1563
1564 pci_bios = &lba_bios_ops;
1565 pcibios_register_hba(HBA_DATA(lba_dev));
1566 spin_lock_init(&lba_dev->lba_lock);
1567
1568 if (lba_hw_init(lba_dev))
1569 return(1);
1570
1571
1572
1573 if (is_pdc_pat()) {
1574
1575 pci_port = &lba_pat_port_ops;
1576
1577 lba_pat_resources(dev, lba_dev);
1578 } else {
1579 if (!astro_iop_base) {
1580
1581 astro_iop_base = ioremap_nocache(LBA_PORT_BASE, 64 * 1024);
1582 pci_port = &lba_astro_port_ops;
1583 }
1584
1585
1586 lba_legacy_resources(dev, lba_dev);
1587 }
1588
1589 if (lba_dev->hba.bus_num.start < lba_next_bus)
1590 lba_dev->hba.bus_num.start = lba_next_bus;
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601 if (truncate_pat_collision(&iomem_resource,
1602 &(lba_dev->hba.lmmio_space))) {
1603 printk(KERN_WARNING "LBA: lmmio_space [%lx/%lx] duplicate!\n",
1604 (long)lba_dev->hba.lmmio_space.start,
1605 (long)lba_dev->hba.lmmio_space.end);
1606 lba_dev->hba.lmmio_space.flags = 0;
1607 }
1608
1609 pci_add_resource_offset(&resources, &lba_dev->hba.io_space,
1610 HBA_PORT_BASE(lba_dev->hba.hba_num));
1611 if (lba_dev->hba.elmmio_space.flags)
1612 pci_add_resource_offset(&resources, &lba_dev->hba.elmmio_space,
1613 lba_dev->hba.lmmio_space_offset);
1614 if (lba_dev->hba.lmmio_space.flags)
1615 pci_add_resource_offset(&resources, &lba_dev->hba.lmmio_space,
1616 lba_dev->hba.lmmio_space_offset);
1617 if (lba_dev->hba.gmmio_space.flags) {
1618
1619
1620
1621 }
1622
1623 pci_add_resource(&resources, &lba_dev->hba.bus_num);
1624
1625 dev->dev.platform_data = lba_dev;
1626 lba_bus = lba_dev->hba.hba_bus =
1627 pci_create_root_bus(&dev->dev, lba_dev->hba.bus_num.start,
1628 cfg_ops, NULL, &resources);
1629 if (!lba_bus) {
1630 pci_free_resource_list(&resources);
1631 return 0;
1632 }
1633
1634 max = pci_scan_child_bus(lba_bus);
1635
1636
1637 if (is_pdc_pat()) {
1638
1639
1640 DBG_PAT("LBA pci_bus_size_bridges()\n");
1641 pci_bus_size_bridges(lba_bus);
1642
1643 DBG_PAT("LBA pci_bus_assign_resources()\n");
1644 pci_bus_assign_resources(lba_bus);
1645
1646#ifdef DEBUG_LBA_PAT
1647 DBG_PAT("\nLBA PIOP resource tree\n");
1648 lba_dump_res(&lba_dev->hba.io_space, 2);
1649 DBG_PAT("\nLBA LMMIO resource tree\n");
1650 lba_dump_res(&lba_dev->hba.lmmio_space, 2);
1651#endif
1652 }
1653
1654
1655
1656
1657
1658
1659 if (cfg_ops == &elroy_cfg_ops) {
1660 lba_dev->flags |= LBA_FLAG_SKIP_PROBE;
1661 }
1662
1663 lba_next_bus = max + 1;
1664 pci_bus_add_devices(lba_bus);
1665
1666
1667 return 0;
1668}
1669
1670static const struct parisc_device_id lba_tbl[] __initconst = {
1671 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, ELROY_HVERS, 0xa },
1672 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, MERCURY_HVERS, 0xa },
1673 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, QUICKSILVER_HVERS, 0xa },
1674 { 0, }
1675};
1676
1677static struct parisc_driver lba_driver __refdata = {
1678 .name = MODULE_NAME,
1679 .id_table = lba_tbl,
1680 .probe = lba_driver_probe,
1681};
1682
1683
1684
1685
1686
1687void __init lba_init(void)
1688{
1689 register_parisc_driver(&lba_driver);
1690}
1691
1692
1693
1694
1695
1696
1697void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
1698{
1699 void __iomem * base_addr = ioremap_nocache(lba->hpa.start, 4096);
1700
1701 imask <<= 2;
1702
1703
1704 WARN_ON((ibase & 0x001fffff) != 0);
1705 WARN_ON((imask & 0x001fffff) != 0);
1706
1707 DBG("%s() ibase 0x%x imask 0x%x\n", __func__, ibase, imask);
1708 WRITE_REG32( imask, base_addr + LBA_IMASK);
1709 WRITE_REG32( ibase, base_addr + LBA_IBASE);
1710 iounmap(base_addr);
1711}
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723static void quirk_diva_ati_card(struct pci_dev *dev)
1724{
1725 if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
1726 dev->subsystem_device != 0x1292)
1727 return;
1728
1729 dev_info(&dev->dev, "Hiding Diva built-in ATI card");
1730 dev->device = 0;
1731}
1732DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY,
1733 quirk_diva_ati_card);
1734
1735static void quirk_diva_aux_disable(struct pci_dev *dev)
1736{
1737 if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
1738 dev->subsystem_device != 0x1291)
1739 return;
1740
1741 dev_info(&dev->dev, "Hiding Diva built-in AUX serial device");
1742 dev->device = 0;
1743}
1744DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
1745 quirk_diva_aux_disable);
1746