1
2
3
4
5
6
7
8
9
10#define __EXTERN_INLINE inline
11#include <asm/io.h>
12#include <asm/core_wildfire.h>
13#undef __EXTERN_INLINE
14
15#include <linux/types.h>
16#include <linux/pci.h>
17#include <linux/sched.h>
18#include <linux/init.h>
19
20#include <asm/ptrace.h>
21#include <asm/smp.h>
22
23#include "proto.h"
24#include "pci_impl.h"
25
26#define DEBUG_CONFIG 0
27#define DEBUG_DUMP_REGS 0
28#define DEBUG_DUMP_CONFIG 1
29
30#if DEBUG_CONFIG
31# define DBG_CFG(args) printk args
32#else
33# define DBG_CFG(args)
34#endif
35
36#if DEBUG_DUMP_REGS
37static void wildfire_dump_pci_regs(int qbbno, int hoseno);
38static void wildfire_dump_pca_regs(int qbbno, int pcano);
39static void wildfire_dump_qsa_regs(int qbbno);
40static void wildfire_dump_qsd_regs(int qbbno);
41static void wildfire_dump_iop_regs(int qbbno);
42static void wildfire_dump_gp_regs(int qbbno);
43#endif
44#if DEBUG_DUMP_CONFIG
45static void wildfire_dump_hardware_config(void);
46#endif
47
48unsigned char wildfire_hard_qbb_map[WILDFIRE_MAX_QBB];
49unsigned char wildfire_soft_qbb_map[WILDFIRE_MAX_QBB];
50#define QBB_MAP_EMPTY 0xff
51
52unsigned long wildfire_hard_qbb_mask;
53unsigned long wildfire_soft_qbb_mask;
54unsigned long wildfire_gp_mask;
55unsigned long wildfire_hs_mask;
56unsigned long wildfire_iop_mask;
57unsigned long wildfire_ior_mask;
58unsigned long wildfire_pca_mask;
59unsigned long wildfire_cpu_mask;
60unsigned long wildfire_mem_mask;
61
62void __init
63wildfire_init_hose(int qbbno, int hoseno)
64{
65 struct pci_controller *hose;
66 wildfire_pci *pci;
67
68 hose = alloc_pci_controller();
69 hose->io_space = alloc_resource();
70 hose->mem_space = alloc_resource();
71
72
73 hose->sparse_mem_base = 0;
74 hose->sparse_io_base = 0;
75 hose->dense_mem_base = WILDFIRE_MEM(qbbno, hoseno);
76 hose->dense_io_base = WILDFIRE_IO(qbbno, hoseno);
77
78 hose->config_space_base = WILDFIRE_CONF(qbbno, hoseno);
79 hose->index = (qbbno << 3) + hoseno;
80
81 hose->io_space->start = WILDFIRE_IO(qbbno, hoseno) - WILDFIRE_IO_BIAS;
82 hose->io_space->end = hose->io_space->start + WILDFIRE_IO_SPACE - 1;
83 hose->io_space->name = pci_io_names[hoseno];
84 hose->io_space->flags = IORESOURCE_IO;
85
86 hose->mem_space->start = WILDFIRE_MEM(qbbno, hoseno)-WILDFIRE_MEM_BIAS;
87 hose->mem_space->end = hose->mem_space->start + 0xffffffff;
88 hose->mem_space->name = pci_mem_names[hoseno];
89 hose->mem_space->flags = IORESOURCE_MEM;
90
91 if (request_resource(&ioport_resource, hose->io_space) < 0)
92 printk(KERN_ERR "Failed to request IO on qbb %d hose %d\n",
93 qbbno, hoseno);
94 if (request_resource(&iomem_resource, hose->mem_space) < 0)
95 printk(KERN_ERR "Failed to request MEM on qbb %d hose %d\n",
96 qbbno, hoseno);
97
98#if DEBUG_DUMP_REGS
99 wildfire_dump_pci_regs(qbbno, hoseno);
100#endif
101
102
103
104
105
106
107
108
109
110
111
112
113
114 hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000,
115 SMP_CACHE_BYTES);
116 hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000,
117 SMP_CACHE_BYTES);
118
119 pci = WILDFIRE_pci(qbbno, hoseno);
120
121 pci->pci_window[0].wbase.csr = hose->sg_isa->dma_base | 3;
122 pci->pci_window[0].wmask.csr = (hose->sg_isa->size - 1) & 0xfff00000;
123 pci->pci_window[0].tbase.csr = virt_to_phys(hose->sg_isa->ptes);
124
125 pci->pci_window[1].wbase.csr = 0x40000000 | 1;
126 pci->pci_window[1].wmask.csr = (0x40000000 -1) & 0xfff00000;
127 pci->pci_window[1].tbase.csr = 0;
128
129 pci->pci_window[2].wbase.csr = 0x80000000 | 1;
130 pci->pci_window[2].wmask.csr = (0x40000000 -1) & 0xfff00000;
131 pci->pci_window[2].tbase.csr = 0x40000000;
132
133 pci->pci_window[3].wbase.csr = hose->sg_pci->dma_base | 3;
134 pci->pci_window[3].wmask.csr = (hose->sg_pci->size - 1) & 0xfff00000;
135 pci->pci_window[3].tbase.csr = virt_to_phys(hose->sg_pci->ptes);
136
137 wildfire_pci_tbi(hose, 0, 0);
138}
139
140void __init
141wildfire_init_pca(int qbbno, int pcano)
142{
143
144
145 if (!WILDFIRE_PCA_EXISTS(qbbno, pcano))
146 return;
147
148#if DEBUG_DUMP_REGS
149 wildfire_dump_pca_regs(qbbno, pcano);
150#endif
151
152
153 wildfire_init_hose(qbbno, (pcano << 1) + 0);
154 wildfire_init_hose(qbbno, (pcano << 1) + 1);
155}
156
157void __init
158wildfire_init_qbb(int qbbno)
159{
160 int pcano;
161
162
163 if (!WILDFIRE_QBB_EXISTS(qbbno))
164 return;
165
166#if DEBUG_DUMP_REGS
167 wildfire_dump_qsa_regs(qbbno);
168 wildfire_dump_qsd_regs(qbbno);
169 wildfire_dump_iop_regs(qbbno);
170 wildfire_dump_gp_regs(qbbno);
171#endif
172
173
174 for (pcano = 0; pcano < WILDFIRE_PCA_PER_QBB; pcano++) {
175 wildfire_init_pca(qbbno, pcano);
176 }
177}
178
179void __init
180wildfire_hardware_probe(void)
181{
182 unsigned long temp;
183 unsigned int hard_qbb, soft_qbb;
184 wildfire_fast_qsd *fast = WILDFIRE_fast_qsd();
185 wildfire_qsd *qsd;
186 wildfire_qsa *qsa;
187 wildfire_iop *iop;
188 wildfire_gp *gp;
189 wildfire_ne *ne;
190 wildfire_fe *fe;
191 int i;
192
193 temp = fast->qsd_whami.csr;
194#if 0
195 printk(KERN_ERR "fast QSD_WHAMI at base %p is 0x%lx\n", fast, temp);
196#endif
197
198 hard_qbb = (temp >> 8) & 7;
199 soft_qbb = (temp >> 4) & 7;
200
201
202 wildfire_hard_qbb_mask = (1 << hard_qbb);
203 wildfire_soft_qbb_mask = (1 << soft_qbb);
204
205 wildfire_gp_mask = 0;
206 wildfire_hs_mask = 0;
207 wildfire_iop_mask = 0;
208 wildfire_ior_mask = 0;
209 wildfire_pca_mask = 0;
210
211 wildfire_cpu_mask = 0;
212 wildfire_mem_mask = 0;
213
214 memset(wildfire_hard_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB);
215 memset(wildfire_soft_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB);
216
217
218 qsa = WILDFIRE_qsa(soft_qbb);
219
220 temp = qsa->qsa_qbb_id.csr;
221#if 0
222 printk(KERN_ERR "QSA_QBB_ID at base %p is 0x%lx\n", qsa, temp);
223#endif
224
225 if (temp & 0x40)
226 wildfire_hs_mask = 1;
227
228 if (temp & 0x20) {
229 gp = WILDFIRE_gp(soft_qbb);
230 temp = 0;
231 for (i = 0; i < 4; i++) {
232 temp |= gp->gpa_qbb_map[i].csr << (i * 8);
233#if 0
234 printk(KERN_ERR "GPA_QBB_MAP[%d] at base %p is 0x%lx\n",
235 i, gp, temp);
236#endif
237 }
238
239 for (hard_qbb = 0; hard_qbb < WILDFIRE_MAX_QBB; hard_qbb++) {
240 if (temp & 8) {
241 soft_qbb = temp & 7;
242 wildfire_hard_qbb_mask |= (1 << hard_qbb);
243 wildfire_soft_qbb_mask |= (1 << soft_qbb);
244 }
245 temp >>= 4;
246 }
247 wildfire_gp_mask = wildfire_soft_qbb_mask;
248 }
249
250
251 for (soft_qbb = 0; soft_qbb < WILDFIRE_MAX_QBB; soft_qbb++) {
252 if (WILDFIRE_QBB_EXISTS(soft_qbb)) {
253 qsd = WILDFIRE_qsd(soft_qbb);
254 temp = qsd->qsd_whami.csr;
255#if 0
256 printk(KERN_ERR "QSD_WHAMI at base %p is 0x%lx\n", qsd, temp);
257#endif
258 hard_qbb = (temp >> 8) & 7;
259 wildfire_hard_qbb_map[hard_qbb] = soft_qbb;
260 wildfire_soft_qbb_map[soft_qbb] = hard_qbb;
261
262 qsa = WILDFIRE_qsa(soft_qbb);
263 temp = qsa->qsa_qbb_pop[0].csr;
264#if 0
265 printk(KERN_ERR "QSA_QBB_POP_0 at base %p is 0x%lx\n", qsa, temp);
266#endif
267 wildfire_cpu_mask |= ((temp >> 0) & 0xf) << (soft_qbb << 2);
268 wildfire_mem_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2);
269
270 temp = qsa->qsa_qbb_pop[1].csr;
271#if 0
272 printk(KERN_ERR "QSA_QBB_POP_1 at base %p is 0x%lx\n", qsa, temp);
273#endif
274 wildfire_iop_mask |= (1 << soft_qbb);
275 wildfire_ior_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2);
276
277 temp = qsa->qsa_qbb_id.csr;
278#if 0
279 printk(KERN_ERR "QSA_QBB_ID at %p is 0x%lx\n", qsa, temp);
280#endif
281 if (temp & 0x20)
282 wildfire_gp_mask |= (1 << soft_qbb);
283
284
285 for (i = 0; i < WILDFIRE_PCA_PER_QBB; i++) {
286 iop = WILDFIRE_iop(soft_qbb);
287 ne = WILDFIRE_ne(soft_qbb, i);
288 fe = WILDFIRE_fe(soft_qbb, i);
289
290 if ((iop->iop_hose[i].init.csr & 1) == 1 &&
291 ((ne->ne_what_am_i.csr & 0xf00000300UL) == 0x100000300UL) &&
292 ((fe->fe_what_am_i.csr & 0xf00000300UL) == 0x100000200UL))
293 {
294 wildfire_pca_mask |= 1 << ((soft_qbb << 2) + i);
295 }
296 }
297
298 }
299 }
300#if DEBUG_DUMP_CONFIG
301 wildfire_dump_hardware_config();
302#endif
303}
304
305void __init
306wildfire_init_arch(void)
307{
308 int qbbno;
309
310
311 ioport_resource.end = ~0UL;
312
313
314
315 wildfire_hardware_probe();
316
317
318 for (qbbno = 0; qbbno < WILDFIRE_MAX_QBB; qbbno++) {
319 wildfire_init_qbb(qbbno);
320 }
321
322
323 __direct_map_base = 0x40000000UL;
324 __direct_map_size = 0x80000000UL;
325}
326
327void
328wildfire_machine_check(unsigned long vector, unsigned long la_ptr)
329{
330 mb();
331 mb();
332 draina();
333
334 wrmces(0x7);
335 mb();
336
337 process_mcheck_info(vector, la_ptr, "WILDFIRE",
338 mcheck_expected(smp_processor_id()));
339}
340
341void
342wildfire_kill_arch(int mode)
343{
344}
345
346void
347wildfire_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
348{
349 int qbbno = hose->index >> 3;
350 int hoseno = hose->index & 7;
351 wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno);
352
353 mb();
354 pci->pci_flush_tlb.csr;
355}
356
357static int
358mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
359 unsigned long *pci_addr, unsigned char *type1)
360{
361 struct pci_controller *hose = pbus->sysdata;
362 unsigned long addr;
363 u8 bus = pbus->number;
364
365 DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
366 "pci_addr=0x%p, type1=0x%p)\n",
367 bus, device_fn, where, pci_addr, type1));
368
369 if (!pbus->parent)
370 bus = 0;
371 *type1 = (bus != 0);
372
373 addr = (bus << 16) | (device_fn << 8) | where;
374 addr |= hose->config_space_base;
375
376 *pci_addr = addr;
377 DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
378 return 0;
379}
380
381static int
382wildfire_read_config(struct pci_bus *bus, unsigned int devfn, int where,
383 int size, u32 *value)
384{
385 unsigned long addr;
386 unsigned char type1;
387
388 if (mk_conf_addr(bus, devfn, where, &addr, &type1))
389 return PCIBIOS_DEVICE_NOT_FOUND;
390
391 switch (size) {
392 case 1:
393 *value = __kernel_ldbu(*(vucp)addr);
394 break;
395 case 2:
396 *value = __kernel_ldwu(*(vusp)addr);
397 break;
398 case 4:
399 *value = *(vuip)addr;
400 break;
401 }
402
403 return PCIBIOS_SUCCESSFUL;
404}
405
406static int
407wildfire_write_config(struct pci_bus *bus, unsigned int devfn, int where,
408 int size, u32 value)
409{
410 unsigned long addr;
411 unsigned char type1;
412
413 if (mk_conf_addr(bus, devfn, where, &addr, &type1))
414 return PCIBIOS_DEVICE_NOT_FOUND;
415
416 switch (size) {
417 case 1:
418 __kernel_stb(value, *(vucp)addr);
419 mb();
420 __kernel_ldbu(*(vucp)addr);
421 break;
422 case 2:
423 __kernel_stw(value, *(vusp)addr);
424 mb();
425 __kernel_ldwu(*(vusp)addr);
426 break;
427 case 4:
428 *(vuip)addr = value;
429 mb();
430 *(vuip)addr;
431 break;
432 }
433
434 return PCIBIOS_SUCCESSFUL;
435}
436
437struct pci_ops wildfire_pci_ops =
438{
439 .read = wildfire_read_config,
440 .write = wildfire_write_config,
441};
442
443
444
445
446
447int wildfire_pa_to_nid(unsigned long pa)
448{
449 return pa >> 36;
450}
451
452int wildfire_cpuid_to_nid(int cpuid)
453{
454
455 return cpuid >> 2;
456}
457
458unsigned long wildfire_node_mem_start(int nid)
459{
460
461 return (unsigned long)nid * (64UL * 1024 * 1024 * 1024);
462}
463
464unsigned long wildfire_node_mem_size(int nid)
465{
466
467 return 64UL * 1024 * 1024 * 1024;
468}
469
470#if DEBUG_DUMP_REGS
471
472static void __init
473wildfire_dump_pci_regs(int qbbno, int hoseno)
474{
475 wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno);
476 int i;
477
478 printk(KERN_ERR "PCI registers for QBB %d hose %d (%p)\n",
479 qbbno, hoseno, pci);
480
481 printk(KERN_ERR " PCI_IO_ADDR_EXT: 0x%16lx\n",
482 pci->pci_io_addr_ext.csr);
483 printk(KERN_ERR " PCI_CTRL: 0x%16lx\n", pci->pci_ctrl.csr);
484 printk(KERN_ERR " PCI_ERR_SUM: 0x%16lx\n", pci->pci_err_sum.csr);
485 printk(KERN_ERR " PCI_ERR_ADDR: 0x%16lx\n", pci->pci_err_addr.csr);
486 printk(KERN_ERR " PCI_STALL_CNT: 0x%16lx\n", pci->pci_stall_cnt.csr);
487 printk(KERN_ERR " PCI_PEND_INT: 0x%16lx\n", pci->pci_pend_int.csr);
488 printk(KERN_ERR " PCI_SENT_INT: 0x%16lx\n", pci->pci_sent_int.csr);
489
490 printk(KERN_ERR " DMA window registers for QBB %d hose %d (%p)\n",
491 qbbno, hoseno, pci);
492 for (i = 0; i < 4; i++) {
493 printk(KERN_ERR " window %d: 0x%16lx 0x%16lx 0x%16lx\n", i,
494 pci->pci_window[i].wbase.csr,
495 pci->pci_window[i].wmask.csr,
496 pci->pci_window[i].tbase.csr);
497 }
498 printk(KERN_ERR "\n");
499}
500
501static void __init
502wildfire_dump_pca_regs(int qbbno, int pcano)
503{
504 wildfire_pca *pca = WILDFIRE_pca(qbbno, pcano);
505 int i;
506
507 printk(KERN_ERR "PCA registers for QBB %d PCA %d (%p)\n",
508 qbbno, pcano, pca);
509
510 printk(KERN_ERR " PCA_WHAT_AM_I: 0x%16lx\n", pca->pca_what_am_i.csr);
511 printk(KERN_ERR " PCA_ERR_SUM: 0x%16lx\n", pca->pca_err_sum.csr);
512 printk(KERN_ERR " PCA_PEND_INT: 0x%16lx\n", pca->pca_pend_int.csr);
513 printk(KERN_ERR " PCA_SENT_INT: 0x%16lx\n", pca->pca_sent_int.csr);
514 printk(KERN_ERR " PCA_STDIO_EL: 0x%16lx\n",
515 pca->pca_stdio_edge_level.csr);
516
517 printk(KERN_ERR " PCA target registers for QBB %d PCA %d (%p)\n",
518 qbbno, pcano, pca);
519 for (i = 0; i < 4; i++) {
520 printk(KERN_ERR " target %d: 0x%16lx 0x%16lx\n", i,
521 pca->pca_int[i].target.csr,
522 pca->pca_int[i].enable.csr);
523 }
524
525 printk(KERN_ERR "\n");
526}
527
528static void __init
529wildfire_dump_qsa_regs(int qbbno)
530{
531 wildfire_qsa *qsa = WILDFIRE_qsa(qbbno);
532 int i;
533
534 printk(KERN_ERR "QSA registers for QBB %d (%p)\n", qbbno, qsa);
535
536 printk(KERN_ERR " QSA_QBB_ID: 0x%16lx\n", qsa->qsa_qbb_id.csr);
537 printk(KERN_ERR " QSA_PORT_ENA: 0x%16lx\n", qsa->qsa_port_ena.csr);
538 printk(KERN_ERR " QSA_REF_INT: 0x%16lx\n", qsa->qsa_ref_int.csr);
539
540 for (i = 0; i < 5; i++)
541 printk(KERN_ERR " QSA_CONFIG_%d: 0x%16lx\n",
542 i, qsa->qsa_config[i].csr);
543
544 for (i = 0; i < 2; i++)
545 printk(KERN_ERR " QSA_QBB_POP_%d: 0x%16lx\n",
546 i, qsa->qsa_qbb_pop[0].csr);
547
548 printk(KERN_ERR "\n");
549}
550
551static void __init
552wildfire_dump_qsd_regs(int qbbno)
553{
554 wildfire_qsd *qsd = WILDFIRE_qsd(qbbno);
555
556 printk(KERN_ERR "QSD registers for QBB %d (%p)\n", qbbno, qsd);
557
558 printk(KERN_ERR " QSD_WHAMI: 0x%16lx\n", qsd->qsd_whami.csr);
559 printk(KERN_ERR " QSD_REV: 0x%16lx\n", qsd->qsd_rev.csr);
560 printk(KERN_ERR " QSD_PORT_PRESENT: 0x%16lx\n",
561 qsd->qsd_port_present.csr);
562 printk(KERN_ERR " QSD_PORT_ACTIVE: 0x%16lx\n",
563 qsd->qsd_port_active.csr);
564 printk(KERN_ERR " QSD_FAULT_ENA: 0x%16lx\n",
565 qsd->qsd_fault_ena.csr);
566 printk(KERN_ERR " QSD_CPU_INT_ENA: 0x%16lx\n",
567 qsd->qsd_cpu_int_ena.csr);
568 printk(KERN_ERR " QSD_MEM_CONFIG: 0x%16lx\n",
569 qsd->qsd_mem_config.csr);
570 printk(KERN_ERR " QSD_ERR_SUM: 0x%16lx\n",
571 qsd->qsd_err_sum.csr);
572
573 printk(KERN_ERR "\n");
574}
575
576static void __init
577wildfire_dump_iop_regs(int qbbno)
578{
579 wildfire_iop *iop = WILDFIRE_iop(qbbno);
580 int i;
581
582 printk(KERN_ERR "IOP registers for QBB %d (%p)\n", qbbno, iop);
583
584 printk(KERN_ERR " IOA_CONFIG: 0x%16lx\n", iop->ioa_config.csr);
585 printk(KERN_ERR " IOD_CONFIG: 0x%16lx\n", iop->iod_config.csr);
586 printk(KERN_ERR " IOP_SWITCH_CREDITS: 0x%16lx\n",
587 iop->iop_switch_credits.csr);
588 printk(KERN_ERR " IOP_HOSE_CREDITS: 0x%16lx\n",
589 iop->iop_hose_credits.csr);
590
591 for (i = 0; i < 4; i++)
592 printk(KERN_ERR " IOP_HOSE_%d_INIT: 0x%16lx\n",
593 i, iop->iop_hose[i].init.csr);
594 for (i = 0; i < 4; i++)
595 printk(KERN_ERR " IOP_DEV_INT_TARGET_%d: 0x%16lx\n",
596 i, iop->iop_dev_int[i].target.csr);
597
598 printk(KERN_ERR "\n");
599}
600
601static void __init
602wildfire_dump_gp_regs(int qbbno)
603{
604 wildfire_gp *gp = WILDFIRE_gp(qbbno);
605 int i;
606
607 printk(KERN_ERR "GP registers for QBB %d (%p)\n", qbbno, gp);
608 for (i = 0; i < 4; i++)
609 printk(KERN_ERR " GPA_QBB_MAP_%d: 0x%16lx\n",
610 i, gp->gpa_qbb_map[i].csr);
611
612 printk(KERN_ERR " GPA_MEM_POP_MAP: 0x%16lx\n",
613 gp->gpa_mem_pop_map.csr);
614 printk(KERN_ERR " GPA_SCRATCH: 0x%16lx\n", gp->gpa_scratch.csr);
615 printk(KERN_ERR " GPA_DIAG: 0x%16lx\n", gp->gpa_diag.csr);
616 printk(KERN_ERR " GPA_CONFIG_0: 0x%16lx\n", gp->gpa_config_0.csr);
617 printk(KERN_ERR " GPA_INIT_ID: 0x%16lx\n", gp->gpa_init_id.csr);
618 printk(KERN_ERR " GPA_CONFIG_2: 0x%16lx\n", gp->gpa_config_2.csr);
619
620 printk(KERN_ERR "\n");
621}
622#endif
623
624#if DEBUG_DUMP_CONFIG
625static void __init
626wildfire_dump_hardware_config(void)
627{
628 int i;
629
630 printk(KERN_ERR "Probed Hardware Configuration\n");
631
632 printk(KERN_ERR " hard_qbb_mask: 0x%16lx\n", wildfire_hard_qbb_mask);
633 printk(KERN_ERR " soft_qbb_mask: 0x%16lx\n", wildfire_soft_qbb_mask);
634
635 printk(KERN_ERR " gp_mask: 0x%16lx\n", wildfire_gp_mask);
636 printk(KERN_ERR " hs_mask: 0x%16lx\n", wildfire_hs_mask);
637 printk(KERN_ERR " iop_mask: 0x%16lx\n", wildfire_iop_mask);
638 printk(KERN_ERR " ior_mask: 0x%16lx\n", wildfire_ior_mask);
639 printk(KERN_ERR " pca_mask: 0x%16lx\n", wildfire_pca_mask);
640
641 printk(KERN_ERR " cpu_mask: 0x%16lx\n", wildfire_cpu_mask);
642 printk(KERN_ERR " mem_mask: 0x%16lx\n", wildfire_mem_mask);
643
644 printk(" hard_qbb_map: ");
645 for (i = 0; i < WILDFIRE_MAX_QBB; i++)
646 if (wildfire_hard_qbb_map[i] == QBB_MAP_EMPTY)
647 printk("--- ");
648 else
649 printk("%3d ", wildfire_hard_qbb_map[i]);
650 printk("\n");
651
652 printk(" soft_qbb_map: ");
653 for (i = 0; i < WILDFIRE_MAX_QBB; i++)
654 if (wildfire_soft_qbb_map[i] == QBB_MAP_EMPTY)
655 printk("--- ");
656 else
657 printk("%3d ", wildfire_soft_qbb_map[i]);
658 printk("\n");
659}
660#endif
661