1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/slab.h>
21#include <linux/mm.h>
22#include <linux/platform_device.h>
23#include <asm/hardware.h>
24#include <asm/io.h>
25#include <asm/mmzone.h>
26#include <asm/pdc.h>
27#include <asm/pdcpat.h>
28#include <asm/processor.h>
29#include <asm/page.h>
30#include <asm/parisc-device.h>
31#include <asm/tlbflush.h>
32
33
34
35
36
37#undef DEBUG_PAT
38
39int pdc_type __ro_after_init = PDC_TYPE_ILLEGAL;
40
41
42unsigned long parisc_cell_num __ro_after_init;
43unsigned long parisc_cell_loc __ro_after_init;
44unsigned long parisc_pat_pdc_cap __ro_after_init;
45
46
47void __init setup_pdc(void)
48{
49 long status;
50 unsigned int bus_id;
51 struct pdc_system_map_mod_info module_result;
52 struct pdc_module_path module_path;
53 struct pdc_model model;
54#ifdef CONFIG_64BIT
55 struct pdc_pat_cell_num cell_info;
56#endif
57
58
59
60 printk(KERN_INFO "Determining PDC firmware type: ");
61
62 status = pdc_system_map_find_mods(&module_result, &module_path, 0);
63 if (status == PDC_OK) {
64 pdc_type = PDC_TYPE_SYSTEM_MAP;
65 pr_cont("System Map.\n");
66 return;
67 }
68
69
70
71
72
73
74
75
76
77
78
79
80#ifdef CONFIG_64BIT
81 status = pdc_pat_cell_get_number(&cell_info);
82 if (status == PDC_OK) {
83 unsigned long legacy_rev, pat_rev;
84 pdc_type = PDC_TYPE_PAT;
85 pr_cont("64 bit PAT.\n");
86 parisc_cell_num = cell_info.cell_num;
87 parisc_cell_loc = cell_info.cell_loc;
88 pr_info("PAT: Running on cell %lu and location %lu.\n",
89 parisc_cell_num, parisc_cell_loc);
90 status = pdc_pat_pd_get_pdc_revisions(&legacy_rev,
91 &pat_rev, &parisc_pat_pdc_cap);
92 pr_info("PAT: legacy revision 0x%lx, pat_rev 0x%lx, pdc_cap 0x%lx, S-PTLB %d, HPMC_RENDEZ %d.\n",
93 legacy_rev, pat_rev, parisc_pat_pdc_cap,
94 parisc_pat_pdc_cap
95 & PDC_PAT_CAPABILITY_BIT_SIMULTANEOUS_PTLB ? 1:0,
96 parisc_pat_pdc_cap
97 & PDC_PAT_CAPABILITY_BIT_PDC_HPMC_RENDEZ ? 1:0);
98 return;
99 }
100#endif
101
102
103
104 status = pdc_model_info(&model);
105
106 bus_id = (model.hversion >> (4 + 7)) & 0x1f;
107
108 switch (bus_id) {
109 case 0x4:
110 case 0x6:
111 case 0x7:
112 case 0x8:
113 case 0xA:
114 case 0xC:
115
116 pdc_type = PDC_TYPE_SNAKE;
117 pr_cont("Snake.\n");
118 return;
119
120 default:
121
122 pr_cont("Unsupported.\n");
123 panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
124 }
125}
126
127#define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12)
128
129static void __init
130set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start,
131 unsigned long pages4k)
132{
133
134
135
136
137
138
139
140 if (unlikely( ((start & (PAGE_SIZE - 1)) != 0)
141 || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) {
142
143 panic("Memory range doesn't align with page size!\n");
144 }
145
146 pmem_ptr->start_pfn = (start >> PAGE_SHIFT);
147 pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT);
148}
149
150static void __init pagezero_memconfig(void)
151{
152 unsigned long npages;
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167 npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT);
168 set_pmem_entry(pmem_ranges,0UL,npages);
169 npmem_ranges = 1;
170}
171
172#ifdef CONFIG_64BIT
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187static int __init
188pat_query_module(ulong pcell_loc, ulong mod_index)
189{
190 pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
191 unsigned long bytecnt;
192 unsigned long temp;
193 long status;
194 struct parisc_device *dev;
195
196 pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
197 if (!pa_pdc_cell)
198 panic("couldn't allocate memory for PDC_PAT_CELL!");
199
200
201 status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
202 PA_VIEW, pa_pdc_cell);
203
204 if (status != PDC_OK) {
205
206 kfree(pa_pdc_cell);
207 return status;
208 }
209
210 temp = pa_pdc_cell->cba;
211 dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path));
212 if (!dev) {
213 kfree(pa_pdc_cell);
214 return PDC_OK;
215 }
216
217
218
219
220
221
222
223
224 dev->pcell_loc = pcell_loc;
225 dev->mod_index = mod_index;
226
227
228
229 dev->mod_info = pa_pdc_cell->mod_info;
230 dev->pmod_loc = pa_pdc_cell->mod_location;
231 dev->mod0 = pa_pdc_cell->mod[0];
232
233 register_parisc_device(dev);
234
235#ifdef DEBUG_PAT
236
237 switch (PAT_GET_ENTITY(dev->mod_info)) {
238 pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
239 unsigned long i;
240
241 case PAT_ENTITY_PROC:
242 printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n",
243 pa_pdc_cell->mod[0]);
244 break;
245
246 case PAT_ENTITY_MEM:
247 printk(KERN_DEBUG
248 "PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n",
249 pa_pdc_cell->mod[0], pa_pdc_cell->mod[1],
250 pa_pdc_cell->mod[2]);
251 break;
252 case PAT_ENTITY_CA:
253 printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc);
254 break;
255
256 case PAT_ENTITY_PBC:
257 printk(KERN_DEBUG "PAT_ENTITY_PBC: ");
258 goto print_ranges;
259
260 case PAT_ENTITY_SBA:
261 printk(KERN_DEBUG "PAT_ENTITY_SBA: ");
262 goto print_ranges;
263
264 case PAT_ENTITY_LBA:
265 printk(KERN_DEBUG "PAT_ENTITY_LBA: ");
266
267 print_ranges:
268 pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
269 IO_VIEW, &io_pdc_cell);
270 printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]);
271 for (i = 0; i < pa_pdc_cell->mod[1]; i++) {
272 printk(KERN_DEBUG
273 " PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
274 i, pa_pdc_cell->mod[2 + i * 3],
275 pa_pdc_cell->mod[3 + i * 3],
276 pa_pdc_cell->mod[4 + i * 3]);
277 printk(KERN_DEBUG
278 " IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
279 i, io_pdc_cell.mod[2 + i * 3],
280 io_pdc_cell.mod[3 + i * 3],
281 io_pdc_cell.mod[4 + i * 3]);
282 }
283 printk(KERN_DEBUG "\n");
284 break;
285 }
286#endif
287
288 kfree(pa_pdc_cell);
289
290 return PDC_OK;
291}
292
293
294
295
296
297
298
299
300
301#define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES)
302
303static void __init pat_memconfig(void)
304{
305 unsigned long actual_len;
306 struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1];
307 struct pdc_pat_pd_addr_map_entry *mtbl_ptr;
308 physmem_range_t *pmem_ptr;
309 long status;
310 int entries;
311 unsigned long length;
312 int i;
313
314 length = (PAT_MAX_RANGES + 1) * sizeof(struct pdc_pat_pd_addr_map_entry);
315
316 status = pdc_pat_pd_get_addr_map(&actual_len, mem_table, length, 0L);
317
318 if ((status != PDC_OK)
319 || ((actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) {
320
321
322
323
324
325 printk("\n\n\n");
326 printk(KERN_WARNING "WARNING! Could not get full memory configuration. "
327 "All memory may not be used!\n\n\n");
328 pagezero_memconfig();
329 return;
330 }
331
332 entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry);
333
334 if (entries > PAT_MAX_RANGES) {
335 printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
336 printk(KERN_WARNING "Some memory may not be used!\n");
337 }
338
339
340
341
342
343
344 npmem_ranges = 0;
345 mtbl_ptr = mem_table;
346 pmem_ptr = pmem_ranges;
347 for (i = 0; i < entries; i++,mtbl_ptr++) {
348 if ( (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR)
349 || (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY)
350 || (mtbl_ptr->pages == 0)
351 || ( (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL)
352 && (mtbl_ptr->memory_usage != PAT_MEMUSE_GI)
353 && (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) {
354
355 continue;
356 }
357
358 if (npmem_ranges == MAX_PHYSMEM_RANGES) {
359 printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
360 printk(KERN_WARNING "Some memory will not be used!\n");
361 break;
362 }
363
364 set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
365 npmem_ranges++;
366 }
367}
368
369static int __init pat_inventory(void)
370{
371 int status;
372 ulong mod_index = 0;
373 struct pdc_pat_cell_num cell_info;
374
375
376
377
378
379 status = pdc_pat_cell_get_number(&cell_info);
380 if (status != PDC_OK) {
381 return 0;
382 }
383
384#ifdef DEBUG_PAT
385 printk(KERN_DEBUG "CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info.cell_num,
386 cell_info.cell_loc);
387#endif
388
389 while (PDC_OK == pat_query_module(cell_info.cell_loc, mod_index)) {
390 mod_index++;
391 }
392
393 return mod_index;
394}
395
396
397static void __init sprockets_memconfig(void)
398{
399 struct pdc_memory_table_raddr r_addr;
400 struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES];
401 struct pdc_memory_table *mtbl_ptr;
402 physmem_range_t *pmem_ptr;
403 long status;
404 int entries;
405 int i;
406
407 status = pdc_mem_mem_table(&r_addr,mem_table,
408 (unsigned long)MAX_PHYSMEM_RANGES);
409
410 if (status != PDC_OK) {
411
412
413
414
415
416
417
418 pagezero_memconfig();
419 return;
420 }
421
422 if (r_addr.entries_total > MAX_PHYSMEM_RANGES) {
423 printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
424 printk(KERN_WARNING "Some memory will not be used!\n");
425 }
426
427 entries = (int)r_addr.entries_returned;
428
429 npmem_ranges = 0;
430 mtbl_ptr = mem_table;
431 pmem_ptr = pmem_ranges;
432 for (i = 0; i < entries; i++,mtbl_ptr++) {
433 set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
434 npmem_ranges++;
435 }
436}
437
438#else
439
440#define pat_inventory() do { } while (0)
441#define pat_memconfig() do { } while (0)
442#define sprockets_memconfig() pagezero_memconfig()
443
444#endif
445
446
447#ifndef CONFIG_PA20
448
449
450
451static struct parisc_device * __init
452legacy_create_device(struct pdc_memory_map *r_addr,
453 struct pdc_module_path *module_path)
454{
455 struct parisc_device *dev;
456 int status = pdc_mem_map_hpa(r_addr, module_path);
457 if (status != PDC_OK)
458 return NULL;
459
460 dev = alloc_pa_dev(r_addr->hpa, &module_path->path);
461 if (dev == NULL)
462 return NULL;
463
464 register_parisc_device(dev);
465 return dev;
466}
467
468
469
470
471
472
473
474
475
476
477static void __init snake_inventory(void)
478{
479 int mod;
480 for (mod = 0; mod < 16; mod++) {
481 struct parisc_device *dev;
482 struct pdc_module_path module_path;
483 struct pdc_memory_map r_addr;
484 unsigned int func;
485
486 memset(module_path.path.bc, 0xff, 6);
487 module_path.path.mod = mod;
488 dev = legacy_create_device(&r_addr, &module_path);
489 if ((!dev) || (dev->id.hw_type != HPHW_BA))
490 continue;
491
492 memset(module_path.path.bc, 0xff, 4);
493 module_path.path.bc[4] = mod;
494
495 for (func = 0; func < 16; func++) {
496 module_path.path.bc[5] = 0;
497 module_path.path.mod = func;
498 legacy_create_device(&r_addr, &module_path);
499 }
500 }
501}
502
503#else
504#define snake_inventory() do { } while (0)
505#endif
506
507
508
509
510
511
512
513
514
515
516
517
518static void __init
519add_system_map_addresses(struct parisc_device *dev, int num_addrs,
520 int module_instance)
521{
522 int i;
523 long status;
524 struct pdc_system_map_addr_info addr_result;
525
526 dev->addr = kmalloc_array(num_addrs, sizeof(*dev->addr), GFP_KERNEL);
527 if(!dev->addr) {
528 printk(KERN_ERR "%s %s(): memory allocation failure\n",
529 __FILE__, __func__);
530 return;
531 }
532
533 for(i = 1; i <= num_addrs; ++i) {
534 status = pdc_system_map_find_addrs(&addr_result,
535 module_instance, i);
536 if(PDC_OK == status) {
537 dev->addr[dev->num_addrs] = (unsigned long)addr_result.mod_addr;
538 dev->num_addrs++;
539 } else {
540 printk(KERN_WARNING
541 "Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n",
542 status, i);
543 }
544 }
545}
546
547
548
549
550
551
552
553static void __init system_map_inventory(void)
554{
555 int i;
556 long status = PDC_OK;
557
558 for (i = 0; i < 256; i++) {
559 struct parisc_device *dev;
560 struct pdc_system_map_mod_info module_result;
561 struct pdc_module_path module_path;
562
563 status = pdc_system_map_find_mods(&module_result,
564 &module_path, i);
565 if ((status == PDC_BAD_PROC) || (status == PDC_NE_MOD))
566 break;
567 if (status != PDC_OK)
568 continue;
569
570 dev = alloc_pa_dev(module_result.mod_addr, &module_path.path);
571 if (!dev)
572 continue;
573
574 register_parisc_device(dev);
575
576
577 if (!module_result.add_addrs)
578 continue;
579
580 add_system_map_addresses(dev, module_result.add_addrs, i);
581 }
582
583 walk_central_bus();
584 return;
585}
586
587void __init do_memory_inventory(void)
588{
589 switch (pdc_type) {
590
591 case PDC_TYPE_PAT:
592 pat_memconfig();
593 break;
594
595 case PDC_TYPE_SYSTEM_MAP:
596 sprockets_memconfig();
597 break;
598
599 case PDC_TYPE_SNAKE:
600 pagezero_memconfig();
601 return;
602
603 default:
604 panic("Unknown PDC type!\n");
605 }
606
607 if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) {
608 printk(KERN_WARNING "Bad memory configuration returned!\n");
609 printk(KERN_WARNING "Some memory may not be used!\n");
610 pagezero_memconfig();
611 }
612}
613
614void __init do_device_inventory(void)
615{
616 printk(KERN_INFO "Searching for devices...\n");
617
618 init_parisc_bus();
619
620 switch (pdc_type) {
621
622 case PDC_TYPE_PAT:
623 pat_inventory();
624 break;
625
626 case PDC_TYPE_SYSTEM_MAP:
627 system_map_inventory();
628 break;
629
630 case PDC_TYPE_SNAKE:
631 snake_inventory();
632 break;
633
634 default:
635 panic("Unknown PDC type!\n");
636 }
637 printk(KERN_INFO "Found devices:\n");
638 print_parisc_devices();
639
640#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
641 pa_serialize_tlb_flushes = machine_has_merced_bus();
642 if (pa_serialize_tlb_flushes)
643 pr_info("Merced bus found: Enable PxTLB serialization.\n");
644#endif
645
646#if defined(CONFIG_FW_CFG_SYSFS)
647 if (running_on_qemu) {
648 struct resource res[3] = {0,};
649 unsigned int base;
650
651 base = ((unsigned long long) PAGE0->pad0[2] << 32)
652 | PAGE0->pad0[3];
653
654 res[0].name = "fw_cfg";
655 res[0].start = base;
656 res[0].end = base + 8 - 1;
657 res[0].flags = IORESOURCE_MEM;
658
659 res[1].name = "ctrl";
660 res[1].start = 0;
661 res[1].flags = IORESOURCE_REG;
662
663 res[2].name = "data";
664 res[2].start = 4;
665 res[2].flags = IORESOURCE_REG;
666
667 if (base) {
668 pr_info("Found qemu fw_cfg interface at %#08x\n", base);
669 platform_device_register_simple("fw_cfg",
670 PLATFORM_DEVID_NONE, res, 3);
671 }
672 }
673#endif
674}
675