1
2
3
4
5
6
7
8
9
10
11
12
13#define __EXTERN_INLINE inline
14#include <asm/io.h>
15#include <asm/core_irongate.h>
16#undef __EXTERN_INLINE
17
18#include <linux/types.h>
19#include <linux/pci.h>
20#include <linux/sched.h>
21#include <linux/init.h>
22#include <linux/initrd.h>
23#include <linux/memblock.h>
24
25#include <asm/ptrace.h>
26#include <asm/cacheflush.h>
27#include <asm/tlbflush.h>
28
29#include "proto.h"
30#include "pci_impl.h"
31
32
33
34
35
36#define DEBUG_CONFIG 0
37
38#if DEBUG_CONFIG
39# define DBG_CFG(args) printk args
40#else
41# define DBG_CFG(args)
42#endif
43
44igcsr32 *IronECC;
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81static int
82mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
83 unsigned long *pci_addr, unsigned char *type1)
84{
85 unsigned long addr;
86 u8 bus = pbus->number;
87
88 DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
89 "pci_addr=0x%p, type1=0x%p)\n",
90 bus, device_fn, where, pci_addr, type1));
91
92 *type1 = (bus != 0);
93
94 addr = (bus << 16) | (device_fn << 8) | where;
95 addr |= IRONGATE_CONF;
96
97 *pci_addr = addr;
98 DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
99 return 0;
100}
101
102static int
103irongate_read_config(struct pci_bus *bus, unsigned int devfn, int where,
104 int size, u32 *value)
105{
106 unsigned long addr;
107 unsigned char type1;
108
109 if (mk_conf_addr(bus, devfn, where, &addr, &type1))
110 return PCIBIOS_DEVICE_NOT_FOUND;
111
112 switch (size) {
113 case 1:
114 *value = __kernel_ldbu(*(vucp)addr);
115 break;
116 case 2:
117 *value = __kernel_ldwu(*(vusp)addr);
118 break;
119 case 4:
120 *value = *(vuip)addr;
121 break;
122 }
123
124 return PCIBIOS_SUCCESSFUL;
125}
126
127static int
128irongate_write_config(struct pci_bus *bus, unsigned int devfn, int where,
129 int size, u32 value)
130{
131 unsigned long addr;
132 unsigned char type1;
133
134 if (mk_conf_addr(bus, devfn, where, &addr, &type1))
135 return PCIBIOS_DEVICE_NOT_FOUND;
136
137 switch (size) {
138 case 1:
139 __kernel_stb(value, *(vucp)addr);
140 mb();
141 __kernel_ldbu(*(vucp)addr);
142 break;
143 case 2:
144 __kernel_stw(value, *(vusp)addr);
145 mb();
146 __kernel_ldwu(*(vusp)addr);
147 break;
148 case 4:
149 *(vuip)addr = value;
150 mb();
151 *(vuip)addr;
152 break;
153 }
154
155 return PCIBIOS_SUCCESSFUL;
156}
157
158struct pci_ops irongate_pci_ops =
159{
160 .read = irongate_read_config,
161 .write = irongate_write_config,
162};
163
164int
165irongate_pci_clr_err(void)
166{
167 unsigned int nmi_ctl=0;
168 unsigned int IRONGATE_jd;
169
170again:
171 IRONGATE_jd = IRONGATE0->stat_cmd;
172 printk("Iron stat_cmd %x\n", IRONGATE_jd);
173 IRONGATE0->stat_cmd = IRONGATE_jd;
174 mb();
175 IRONGATE_jd = IRONGATE0->stat_cmd;
176
177 IRONGATE_jd = *IronECC;
178 printk("Iron ECC %x\n", IRONGATE_jd);
179 *IronECC = IRONGATE_jd;
180 mb();
181 IRONGATE_jd = *IronECC;
182
183
184 nmi_ctl = inb(0x61);
185 nmi_ctl |= 0x0c;
186 outb(nmi_ctl, 0x61);
187 nmi_ctl &= ~0x0c;
188 outb(nmi_ctl, 0x61);
189
190 IRONGATE_jd = *IronECC;
191 if (IRONGATE_jd & 0x300) goto again;
192
193 return 0;
194}
195
196#define IRONGATE_3GB 0xc0000000UL
197
198
199
200
201static void __init
202albacore_init_arch(void)
203{
204 unsigned long memtop = max_low_pfn << PAGE_SHIFT;
205 unsigned long pci_mem = (memtop + 0x1000000UL) & ~0xffffffUL;
206 struct percpu_struct *cpu;
207 int pal_rev, pal_var;
208
209 cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
210 pal_rev = cpu->pal_revision & 0xffff;
211 pal_var = (cpu->pal_revision >> 16) & 0xff;
212
213
214
215
216
217 if (alpha_using_srm &&
218 (pal_rev < 0x13e || (pal_rev == 0x13e && pal_var < 2)))
219 printk(KERN_WARNING "WARNING! Upgrade to SRM A5.6-19 "
220 "or later\n");
221
222 if (pci_mem > IRONGATE_3GB)
223 pci_mem = IRONGATE_3GB;
224 IRONGATE0->pci_mem = pci_mem;
225 alpha_mv.min_mem_address = pci_mem;
226 if (memtop > pci_mem) {
227#ifdef CONFIG_BLK_DEV_INITRD
228 extern unsigned long initrd_start, initrd_end;
229 extern void *move_initrd(unsigned long);
230
231
232 if (initrd_end && __pa(initrd_end) > pci_mem) {
233 unsigned long size;
234
235 size = initrd_end - initrd_start;
236 memblock_free(__pa(initrd_start), PAGE_ALIGN(size));
237 if (!move_initrd(pci_mem))
238 printk("irongate_init_arch: initrd too big "
239 "(%ldK)\ndisabling initrd\n",
240 size / 1024);
241 }
242#endif
243 memblock_reserve(pci_mem, memtop - pci_mem);
244 printk("irongate_init_arch: temporarily reserving "
245 "region %08lx-%08lx for PCI\n", pci_mem, memtop - 1);
246 }
247}
248
249static void __init
250irongate_setup_agp(void)
251{
252
253
254 IRONGATE0->agpva = IRONGATE0->agpva & ~0xf;
255 alpha_agpgart_size = 0;
256}
257
258void __init
259irongate_init_arch(void)
260{
261 struct pci_controller *hose;
262 int amd761 = (IRONGATE0->dev_vendor >> 16) > 0x7006;
263
264 IronECC = amd761 ? &IRONGATE0->bacsr54_eccms761 : &IRONGATE0->dramms;
265
266 irongate_pci_clr_err();
267
268 if (amd761)
269 albacore_init_arch();
270
271 irongate_setup_agp();
272
273
274
275
276
277 pci_isa_hose = hose = alloc_pci_controller();
278 hose->io_space = &ioport_resource;
279 hose->mem_space = &iomem_resource;
280 hose->index = 0;
281
282
283
284
285
286 hose->sparse_mem_base = 0;
287 hose->sparse_io_base = 0;
288 hose->dense_mem_base
289 = (IRONGATE_MEM & 0xffffffffffUL) | 0x80000000000UL;
290 hose->dense_io_base
291 = (IRONGATE_IO & 0xffffffffffUL) | 0x80000000000UL;
292
293 hose->sg_isa = hose->sg_pci = NULL;
294 __direct_map_base = 0;
295 __direct_map_size = 0xffffffff;
296}
297
298
299
300
301#include <linux/vmalloc.h>
302#include <linux/agp_backend.h>
303#include <linux/agpgart.h>
304#include <linux/export.h>
305
306#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
307#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr))
308
309#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
310#define GET_GATT(addr) (gatt_pages[GET_PAGE_DIR_IDX(addr)])
311
312void __iomem *
313irongate_ioremap(unsigned long addr, unsigned long size)
314{
315 struct vm_struct *area;
316 unsigned long vaddr;
317 unsigned long baddr, last;
318 u32 *mmio_regs, *gatt_pages, *cur_gatt, pte;
319 unsigned long gart_bus_addr;
320
321 if (!alpha_agpgart_size)
322 return (void __iomem *)(addr + IRONGATE_MEM);
323
324 gart_bus_addr = (unsigned long)IRONGATE0->bar0 &
325 PCI_BASE_ADDRESS_MEM_MASK;
326
327
328
329
330 do {
331
332
333
334 if (addr >= gart_bus_addr && addr + size - 1 <
335 gart_bus_addr + alpha_agpgart_size)
336 break;
337
338
339
340
341 return (void __iomem *)(addr + IRONGATE_MEM);
342 } while(0);
343
344 mmio_regs = (u32 *)(((unsigned long)IRONGATE0->bar1 &
345 PCI_BASE_ADDRESS_MEM_MASK) + IRONGATE_MEM);
346
347 gatt_pages = (u32 *)(phys_to_virt(mmio_regs[1]));
348
349
350
351
352 if (addr & ~PAGE_MASK) {
353 printk("AGP ioremap failed... addr not page aligned (0x%lx)\n",
354 addr);
355 return (void __iomem *)(addr + IRONGATE_MEM);
356 }
357 last = addr + size - 1;
358 size = PAGE_ALIGN(last) - addr;
359
360#if 0
361 printk("irongate_ioremap(0x%lx, 0x%lx)\n", addr, size);
362 printk("irongate_ioremap: gart_bus_addr 0x%lx\n", gart_bus_addr);
363 printk("irongate_ioremap: gart_aper_size 0x%lx\n", gart_aper_size);
364 printk("irongate_ioremap: mmio_regs %p\n", mmio_regs);
365 printk("irongate_ioremap: gatt_pages %p\n", gatt_pages);
366
367 for(baddr = addr; baddr <= last; baddr += PAGE_SIZE)
368 {
369 cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1);
370 pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1;
371 printk("irongate_ioremap: cur_gatt %p pte 0x%x\n",
372 cur_gatt, pte);
373 }
374#endif
375
376
377
378
379 area = get_vm_area(size, VM_IOREMAP);
380 if (!area) return NULL;
381
382 for(baddr = addr, vaddr = (unsigned long)area->addr;
383 baddr <= last;
384 baddr += PAGE_SIZE, vaddr += PAGE_SIZE)
385 {
386 cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1);
387 pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1;
388
389 if (__alpha_remap_area_pages(vaddr,
390 pte, PAGE_SIZE, 0)) {
391 printk("AGP ioremap: FAILED to map...\n");
392 vfree(area->addr);
393 return NULL;
394 }
395 }
396
397 flush_tlb_all();
398
399 vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
400#if 0
401 printk("irongate_ioremap(0x%lx, 0x%lx) returning 0x%lx\n",
402 addr, size, vaddr);
403#endif
404 return (void __iomem *)vaddr;
405}
406EXPORT_SYMBOL(irongate_ioremap);
407
408void
409irongate_iounmap(volatile void __iomem *xaddr)
410{
411 unsigned long addr = (unsigned long) xaddr;
412 if (((long)addr >> 41) == -2)
413 return;
414 if (addr)
415 return vfree((void *)(PAGE_MASK & addr));
416}
417EXPORT_SYMBOL(irongate_iounmap);
418