1
2
3
4
5
6
7#define __EXTERN_INLINE inline
8#include <asm/io.h>
9#include <asm/core_marvel.h>
10#undef __EXTERN_INLINE
11
12#include <linux/types.h>
13#include <linux/pci.h>
14#include <linux/sched.h>
15#include <linux/init.h>
16#include <linux/vmalloc.h>
17#include <linux/mc146818rtc.h>
18#include <linux/rtc.h>
19#include <linux/module.h>
20#include <linux/bootmem.h>
21
22#include <asm/ptrace.h>
23#include <asm/smp.h>
24#include <asm/gct.h>
25#include <asm/pgalloc.h>
26#include <asm/tlbflush.h>
27#include <asm/rtc.h>
28#include <asm/vga.h>
29
30#include "proto.h"
31#include "pci_impl.h"
32
33
34
35
36
37#define DEBUG_CONFIG 0
38
39#if DEBUG_CONFIG
40# define DBG_CFG(args) printk args
41#else
42# define DBG_CFG(args)
43#endif
44
45
46
47
48
49static struct io7 *io7_head = NULL;
50
51
52
53
54
55static unsigned long __attribute__ ((unused))
56read_ev7_csr(int pe, unsigned long offset)
57{
58 ev7_csr *ev7csr = EV7_CSR_KERN(pe, offset);
59 unsigned long q;
60
61 mb();
62 q = ev7csr->csr;
63 mb();
64
65 return q;
66}
67
68static void __attribute__ ((unused))
69write_ev7_csr(int pe, unsigned long offset, unsigned long q)
70{
71 ev7_csr *ev7csr = EV7_CSR_KERN(pe, offset);
72
73 mb();
74 ev7csr->csr = q;
75 mb();
76}
77
78static char * __init
79mk_resource_name(int pe, int port, char *str)
80{
81 char tmp[80];
82 char *name;
83
84 sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port);
85 name = alloc_bootmem(strlen(tmp) + 1);
86 strcpy(name, tmp);
87
88 return name;
89}
90
91inline struct io7 *
92marvel_next_io7(struct io7 *prev)
93{
94 return (prev ? prev->next : io7_head);
95}
96
97struct io7 *
98marvel_find_io7(int pe)
99{
100 struct io7 *io7;
101
102 for (io7 = io7_head; io7 && io7->pe != pe; io7 = io7->next)
103 continue;
104
105 return io7;
106}
107
108static struct io7 * __init
109alloc_io7(unsigned int pe)
110{
111 struct io7 *io7;
112 struct io7 *insp;
113 int h;
114
115 if (marvel_find_io7(pe)) {
116 printk(KERN_WARNING "IO7 at PE %d already allocated!\n", pe);
117 return NULL;
118 }
119
120 io7 = alloc_bootmem(sizeof(*io7));
121 io7->pe = pe;
122 spin_lock_init(&io7->irq_lock);
123
124 for (h = 0; h < 4; h++) {
125 io7->ports[h].io7 = io7;
126 io7->ports[h].port = h;
127 io7->ports[h].enabled = 0;
128 }
129
130
131
132
133 if (NULL == io7_head)
134 io7_head = io7;
135 else if (io7_head->pe > io7->pe) {
136 io7->next = io7_head;
137 io7_head = io7;
138 } else {
139 for (insp = io7_head; insp; insp = insp->next) {
140 if (insp->pe == io7->pe) {
141 printk(KERN_ERR "Too many IO7s at PE %d\n",
142 io7->pe);
143 return NULL;
144 }
145
146 if (NULL == insp->next ||
147 insp->next->pe > io7->pe) {
148 io7->next = insp->next;
149 insp->next = io7;
150 break;
151 }
152 }
153
154 if (NULL == insp) {
155 printk(KERN_WARNING "Failed to insert IO7 at PE %d "
156 " - adding at head of list\n", io7->pe);
157 io7->next = io7_head;
158 io7_head = io7;
159 }
160 }
161
162 return io7;
163}
164
165void
166io7_clear_errors(struct io7 *io7)
167{
168 io7_port7_csrs *p7csrs;
169 io7_ioport_csrs *csrs;
170 int port;
171
172
173
174
175
176 for (port = 0; port < 4; port++) {
177 csrs = IO7_CSRS_KERN(io7->pe, port);
178
179 csrs->POx_ERR_SUM.csr = -1UL;
180 csrs->POx_TLB_ERR.csr = -1UL;
181 csrs->POx_SPL_COMPLT.csr = -1UL;
182 csrs->POx_TRANS_SUM.csr = -1UL;
183 }
184
185
186
187
188 p7csrs = IO7_PORT7_CSRS_KERN(io7->pe);
189
190 p7csrs->PO7_ERROR_SUM.csr = -1UL;
191 p7csrs->PO7_UNCRR_SYM.csr = -1UL;
192 p7csrs->PO7_CRRCT_SYM.csr = -1UL;
193}
194
195
196
197
198
199static void __init
200io7_init_hose(struct io7 *io7, int port)
201{
202 static int hose_index = 0;
203
204 struct pci_controller *hose = alloc_pci_controller();
205 struct io7_port *io7_port = &io7->ports[port];
206 io7_ioport_csrs *csrs = IO7_CSRS_KERN(io7->pe, port);
207 int i;
208
209 hose->index = hose_index++;
210
211
212
213
214
215
216
217
218
219 if (hose->index == 0)
220 pci_isa_hose = hose;
221
222 io7_port->csrs = csrs;
223 io7_port->hose = hose;
224 hose->sysdata = io7_port;
225
226 hose->io_space = alloc_resource();
227 hose->mem_space = alloc_resource();
228
229
230
231
232
233 hose->sparse_mem_base = hose->sparse_io_base = 0;
234 hose->dense_mem_base = IO7_MEM_PHYS(io7->pe, port);
235 hose->dense_io_base = IO7_IO_PHYS(io7->pe, port);
236
237
238
239
240 hose->config_space_base = (unsigned long)IO7_CONF_KERN(io7->pe, port);
241
242 hose->io_space->start = (unsigned long)IO7_IO_KERN(io7->pe, port);
243 hose->io_space->end = hose->io_space->start + IO7_IO_SPACE - 1;
244 hose->io_space->name = mk_resource_name(io7->pe, port, "IO");
245 hose->io_space->flags = IORESOURCE_IO;
246
247 hose->mem_space->start = (unsigned long)IO7_MEM_KERN(io7->pe, port);
248 hose->mem_space->end = hose->mem_space->start + IO7_MEM_SPACE - 1;
249 hose->mem_space->name = mk_resource_name(io7->pe, port, "MEM");
250 hose->mem_space->flags = IORESOURCE_MEM;
251
252 if (request_resource(&ioport_resource, hose->io_space) < 0)
253 printk(KERN_ERR "Failed to request IO on hose %d\n",
254 hose->index);
255 if (request_resource(&iomem_resource, hose->mem_space) < 0)
256 printk(KERN_ERR "Failed to request MEM on hose %d\n",
257 hose->index);
258
259
260
261
262 for (i = 0; i < 4; i++) {
263 io7_port->saved_wbase[i] = csrs->POx_WBASE[i].csr;
264 io7_port->saved_wmask[i] = csrs->POx_WMASK[i].csr;
265 io7_port->saved_tbase[i] = csrs->POx_TBASE[i].csr;
266 }
267
268
269
270
271
272
273
274
275
276
277
278
279
280 marvel_pci_tbi(hose, 0, -1);
281
282
283
284
285 hose->sg_isa = iommu_arena_new_node(marvel_cpuid_to_nid(io7->pe),
286 hose, 0x00800000, 0x00800000, 0);
287 hose->sg_isa->align_entry = 8;
288 csrs->POx_WBASE[0].csr =
289 hose->sg_isa->dma_base | wbase_m_ena | wbase_m_sg;
290 csrs->POx_WMASK[0].csr = (hose->sg_isa->size - 1) & wbase_m_addr;
291 csrs->POx_TBASE[0].csr = virt_to_phys(hose->sg_isa->ptes);
292
293
294
295
296 csrs->POx_WBASE[1].csr = __direct_map_base | wbase_m_ena;
297 csrs->POx_WMASK[1].csr = (__direct_map_size - 1) & wbase_m_addr;
298 csrs->POx_TBASE[1].csr = 0;
299
300
301
302
303 hose->sg_pci = iommu_arena_new_node(marvel_cpuid_to_nid(io7->pe),
304 hose, 0xc0000000, 0x40000000, 0);
305 hose->sg_pci->align_entry = 8;
306 csrs->POx_WBASE[2].csr =
307 hose->sg_pci->dma_base | wbase_m_ena | wbase_m_sg;
308 csrs->POx_WMASK[2].csr = (hose->sg_pci->size - 1) & wbase_m_addr;
309 csrs->POx_TBASE[2].csr = virt_to_phys(hose->sg_pci->ptes);
310
311
312
313
314 csrs->POx_WBASE[3].csr = 0;
315
316
317
318
319 csrs->POx_CTRL.csr &= ~(1UL << 61);
320
321#if 1
322 printk("FIXME: disabling master aborts\n");
323 csrs->POx_MSK_HEI.csr &= ~(3UL << 14);
324#endif
325
326
327
328 marvel_pci_tbi(hose, 0, -1);
329}
330
331static void __init
332marvel_init_io7(struct io7 *io7)
333{
334 int i;
335
336 printk("Initializing IO7 at PID %d\n", io7->pe);
337
338
339
340
341 io7->csrs = IO7_PORT7_CSRS_KERN(io7->pe);
342
343
344
345
346 for (i = 0; i < IO7_NUM_PORTS; i++) {
347 io7_ioport_csrs *csrs = IO7_CSRS_KERN(io7->pe, i);
348 if (csrs->POx_CACHE_CTL.csr == 8) {
349 io7->ports[i].enabled = 1;
350 io7_init_hose(io7, i);
351 }
352 }
353}
354
355void
356marvel_io7_present(gct6_node *node)
357{
358 int pe;
359
360 if (node->type != GCT_TYPE_HOSE ||
361 node->subtype != GCT_SUBTYPE_IO_PORT_MODULE)
362 return;
363
364 pe = (node->id >> 8) & 0xff;
365 printk("Found an IO7 at PID %d\n", pe);
366
367 alloc_io7(pe);
368}
369
370static void __init
371marvel_find_console_vga_hose(void)
372{
373 u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset);
374
375 if (pu64[7] == 3) {
376 struct pci_controller *hose = NULL;
377 int h = (pu64[30] >> 24) & 0xff;
378 struct io7 *io7;
379 int pid, port;
380
381
382
383
384
385
386 printk("console graphics is on hose %d (console)\n", h);
387
388
389
390
391
392
393
394
395
396 pid = h >> 2;
397 port = h & 3;
398 if ((io7 = marvel_find_io7(pid)))
399 hose = io7->ports[port].hose;
400
401 if (hose) {
402 printk("Console graphics on hose %d\n", hose->index);
403 pci_vga_hose = hose;
404 }
405 }
406}
407
408gct6_search_struct gct_wanted_node_list[] = {
409 { GCT_TYPE_HOSE, GCT_SUBTYPE_IO_PORT_MODULE, marvel_io7_present },
410 { 0, 0, NULL }
411};
412
413
414
415
416
417
418static int __init
419marvel_specify_io7(char *str)
420{
421 unsigned long pid;
422 struct io7 *io7;
423 char *pchar;
424
425 do {
426 pid = simple_strtoul(str, &pchar, 0);
427 if (pchar != str) {
428 printk("User-specified IO7 at PID %lu\n", pid);
429 io7 = alloc_io7(pid);
430 if (io7) marvel_init_io7(io7);
431 }
432
433 if (pchar == str) pchar++;
434 str = pchar;
435 } while(*str);
436
437 return 1;
438}
439__setup("io7=", marvel_specify_io7);
440
441void __init
442marvel_init_arch(void)
443{
444 struct io7 *io7;
445
446
447 ioport_resource.end = ~0UL;
448
449
450 __direct_map_base = 0x80000000;
451 __direct_map_size = 0x40000000;
452
453
454 gct6_find_nodes(GCT_NODE_PTR(0), gct_wanted_node_list);
455
456
457 for (io7 = NULL; NULL != (io7 = marvel_next_io7(io7)); )
458 marvel_init_io7(io7);
459
460
461 marvel_find_console_vga_hose();
462}
463
464void
465marvel_kill_arch(int mode)
466{
467}
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497static inline unsigned long
498build_conf_addr(struct pci_controller *hose, u8 bus,
499 unsigned int devfn, int where)
500{
501 return (hose->config_space_base | (bus << 16) | (devfn << 8) | where);
502}
503
504static unsigned long
505mk_conf_addr(struct pci_bus *pbus, unsigned int devfn, int where)
506{
507 struct pci_controller *hose = pbus->sysdata;
508 struct io7_port *io7_port;
509 unsigned long addr = 0;
510 u8 bus = pbus->number;
511
512 if (!hose)
513 return addr;
514
515
516 io7_port = hose->sysdata;
517 if (!io7_port->enabled)
518 return addr;
519
520 if (!pbus->parent) {
521
522 if (devfn >= PCI_DEVFN(21, 0))
523 return addr;
524 bus = 0;
525 }
526
527 addr = build_conf_addr(hose, bus, devfn, where);
528
529 DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
530 return addr;
531}
532
533static int
534marvel_read_config(struct pci_bus *bus, unsigned int devfn, int where,
535 int size, u32 *value)
536{
537 unsigned long addr;
538
539 if (0 == (addr = mk_conf_addr(bus, devfn, where)))
540 return PCIBIOS_DEVICE_NOT_FOUND;
541
542 switch(size) {
543 case 1:
544 *value = __kernel_ldbu(*(vucp)addr);
545 break;
546 case 2:
547 *value = __kernel_ldwu(*(vusp)addr);
548 break;
549 case 4:
550 *value = *(vuip)addr;
551 break;
552 default:
553 return PCIBIOS_FUNC_NOT_SUPPORTED;
554 }
555
556 return PCIBIOS_SUCCESSFUL;
557}
558
559static int
560marvel_write_config(struct pci_bus *bus, unsigned int devfn, int where,
561 int size, u32 value)
562{
563 unsigned long addr;
564
565 if (0 == (addr = mk_conf_addr(bus, devfn, where)))
566 return PCIBIOS_DEVICE_NOT_FOUND;
567
568 switch (size) {
569 case 1:
570 __kernel_stb(value, *(vucp)addr);
571 mb();
572 __kernel_ldbu(*(vucp)addr);
573 break;
574 case 2:
575 __kernel_stw(value, *(vusp)addr);
576 mb();
577 __kernel_ldwu(*(vusp)addr);
578 break;
579 case 4:
580 *(vuip)addr = value;
581 mb();
582 *(vuip)addr;
583 break;
584 default:
585 return PCIBIOS_FUNC_NOT_SUPPORTED;
586 }
587
588 return PCIBIOS_SUCCESSFUL;
589}
590
591struct pci_ops marvel_pci_ops =
592{
593 .read = marvel_read_config,
594 .write = marvel_write_config,
595};
596
597
598
599
600
601void
602marvel_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
603{
604 io7_ioport_csrs *csrs = ((struct io7_port *)hose->sysdata)->csrs;
605
606 wmb();
607 csrs->POx_SG_TBIA.csr = 0;
608 mb();
609 csrs->POx_SG_TBIA.csr;
610}
611
612
613
614
615
616
617struct marvel_rtc_access_info {
618 unsigned long function;
619 unsigned long index;
620 unsigned long data;
621};
622
623static void
624__marvel_access_rtc(void *info)
625{
626 struct marvel_rtc_access_info *rtc_access = info;
627
628 register unsigned long __r0 __asm__("$0");
629 register unsigned long __r16 __asm__("$16") = rtc_access->function;
630 register unsigned long __r17 __asm__("$17") = rtc_access->index;
631 register unsigned long __r18 __asm__("$18") = rtc_access->data;
632
633 __asm__ __volatile__(
634 "call_pal %4 # cserve rtc"
635 : "=r"(__r16), "=r"(__r17), "=r"(__r18), "=r"(__r0)
636 : "i"(PAL_cserve), "0"(__r16), "1"(__r17), "2"(__r18)
637 : "$1", "$22", "$23", "$24", "$25");
638
639 rtc_access->data = __r0;
640}
641
642static u8
643__marvel_rtc_io(u8 b, unsigned long addr, int write)
644{
645 static u8 index = 0;
646
647 struct marvel_rtc_access_info rtc_access;
648 u8 ret = 0;
649
650 switch(addr) {
651 case 0x70:
652 if (write) index = b;
653 ret = index;
654 break;
655
656 case 0x71:
657 rtc_access.index = index;
658 rtc_access.data = bcd2bin(b);
659 rtc_access.function = 0x48 + !write;
660
661 __marvel_access_rtc(&rtc_access);
662
663 ret = bin2bcd(rtc_access.data);
664 break;
665
666 default:
667 printk(KERN_WARNING "Illegal RTC port %lx\n", addr);
668 break;
669 }
670
671 return ret;
672}
673
674
675
676
677
678void __iomem *
679marvel_ioremap(unsigned long addr, unsigned long size)
680{
681 struct pci_controller *hose;
682 unsigned long baddr, last;
683 struct vm_struct *area;
684 unsigned long vaddr;
685 unsigned long *ptes;
686 unsigned long pfn;
687
688
689
690
691 FIXUP_MEMADDR_VGA(addr);
692
693
694
695
696 for (hose = hose_head; hose; hose = hose->next) {
697 if ((addr >> 32) == (hose->mem_space->start >> 32))
698 break;
699 }
700 if (!hose)
701 return NULL;
702
703
704
705
706 baddr = addr - hose->mem_space->start;
707 last = baddr + size - 1;
708
709
710
711
712 if ((baddr >= __direct_map_base) &&
713 ((baddr + size - 1) < __direct_map_base + __direct_map_size)) {
714 addr = IDENT_ADDR | (baddr - __direct_map_base);
715 return (void __iomem *) addr;
716 }
717
718
719
720
721 if (hose->sg_pci &&
722 baddr >= (unsigned long)hose->sg_pci->dma_base &&
723 last < (unsigned long)hose->sg_pci->dma_base + hose->sg_pci->size) {
724
725
726
727
728 baddr -= hose->sg_pci->dma_base;
729 last -= hose->sg_pci->dma_base;
730 baddr &= PAGE_MASK;
731 size = PAGE_ALIGN(last) - baddr;
732
733
734
735
736 area = get_vm_area(size, VM_IOREMAP);
737 if (!area)
738 return NULL;
739
740 ptes = hose->sg_pci->ptes;
741 for (vaddr = (unsigned long)area->addr;
742 baddr <= last;
743 baddr += PAGE_SIZE, vaddr += PAGE_SIZE) {
744 pfn = ptes[baddr >> PAGE_SHIFT];
745 if (!(pfn & 1)) {
746 printk("ioremap failed... pte not valid...\n");
747 vfree(area->addr);
748 return NULL;
749 }
750 pfn >>= 1;
751
752 if (__alpha_remap_area_pages(vaddr,
753 pfn << PAGE_SHIFT,
754 PAGE_SIZE, 0)) {
755 printk("FAILED to map...\n");
756 vfree(area->addr);
757 return NULL;
758 }
759 }
760
761 flush_tlb_all();
762
763 vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
764
765 return (void __iomem *) vaddr;
766 }
767
768
769 vaddr = baddr + hose->mem_space->start;
770 return (void __iomem *) vaddr;
771}
772
773void
774marvel_iounmap(volatile void __iomem *xaddr)
775{
776 unsigned long addr = (unsigned long) xaddr;
777 if (addr >= VMALLOC_START)
778 vfree((void *)(PAGE_MASK & addr));
779}
780
781int
782marvel_is_mmio(const volatile void __iomem *xaddr)
783{
784 unsigned long addr = (unsigned long) xaddr;
785
786 if (addr >= VMALLOC_START)
787 return 1;
788 else
789 return (addr & 0xFF000000UL) == 0;
790}
791
792#define __marvel_is_port_kbd(a) (((a) == 0x60) || ((a) == 0x64))
793#define __marvel_is_port_rtc(a) (((a) == 0x70) || ((a) == 0x71))
794
795void __iomem *marvel_ioportmap (unsigned long addr)
796{
797 FIXUP_IOADDR_VGA(addr);
798 return (void __iomem *)addr;
799}
800
801unsigned int
802marvel_ioread8(void __iomem *xaddr)
803{
804 unsigned long addr = (unsigned long) xaddr;
805 if (__marvel_is_port_kbd(addr))
806 return 0;
807 else if (__marvel_is_port_rtc(addr))
808 return __marvel_rtc_io(0, addr, 0);
809 else if (marvel_is_ioaddr(addr))
810 return __kernel_ldbu(*(vucp)addr);
811 else
812
813
814
815
816 return ~0;
817}
818
819void
820marvel_iowrite8(u8 b, void __iomem *xaddr)
821{
822 unsigned long addr = (unsigned long) xaddr;
823 if (__marvel_is_port_kbd(addr))
824 return;
825 else if (__marvel_is_port_rtc(addr))
826 __marvel_rtc_io(b, addr, 1);
827 else if (marvel_is_ioaddr(addr))
828 __kernel_stb(b, *(vucp)addr);
829}
830
831#ifndef CONFIG_ALPHA_GENERIC
832EXPORT_SYMBOL(marvel_ioremap);
833EXPORT_SYMBOL(marvel_iounmap);
834EXPORT_SYMBOL(marvel_is_mmio);
835EXPORT_SYMBOL(marvel_ioportmap);
836EXPORT_SYMBOL(marvel_ioread8);
837EXPORT_SYMBOL(marvel_iowrite8);
838#endif
839
840
841
842
843
844
845
846
847
848int
849marvel_pa_to_nid(unsigned long pa)
850{
851 int cpuid;
852
853 if ((pa >> 43) & 1)
854 cpuid = (~(pa >> 35) & 0xff);
855 else
856 cpuid = ((pa >> 34) & 0x3) | ((pa >> (37 - 2)) & (0x1f << 2));
857
858 return marvel_cpuid_to_nid(cpuid);
859}
860
861int
862marvel_cpuid_to_nid(int cpuid)
863{
864 return cpuid;
865}
866
867unsigned long
868marvel_node_mem_start(int nid)
869{
870 unsigned long pa;
871
872 pa = (nid & 0x3) | ((nid & (0x1f << 2)) << 1);
873 pa <<= 34;
874
875 return pa;
876}
877
878unsigned long
879marvel_node_mem_size(int nid)
880{
881 return 16UL * 1024 * 1024 * 1024;
882}
883
884
885
886
887
888#include <linux/agp_backend.h>
889#include <asm/agp_backend.h>
890#include <linux/slab.h>
891#include <linux/delay.h>
892
893struct marvel_agp_aperture {
894 struct pci_iommu_arena *arena;
895 long pg_start;
896 long pg_count;
897};
898
899static int
900marvel_agp_setup(alpha_agp_info *agp)
901{
902 struct marvel_agp_aperture *aper;
903
904 if (!alpha_agpgart_size)
905 return -ENOMEM;
906
907 aper = kmalloc(sizeof(*aper), GFP_KERNEL);
908 if (aper == NULL) return -ENOMEM;
909
910 aper->arena = agp->hose->sg_pci;
911 aper->pg_count = alpha_agpgart_size / PAGE_SIZE;
912 aper->pg_start = iommu_reserve(aper->arena, aper->pg_count,
913 aper->pg_count - 1);
914
915 if (aper->pg_start < 0) {
916 printk(KERN_ERR "Failed to reserve AGP memory\n");
917 kfree(aper);
918 return -ENOMEM;
919 }
920
921 agp->aperture.bus_base =
922 aper->arena->dma_base + aper->pg_start * PAGE_SIZE;
923 agp->aperture.size = aper->pg_count * PAGE_SIZE;
924 agp->aperture.sysdata = aper;
925
926 return 0;
927}
928
929static void
930marvel_agp_cleanup(alpha_agp_info *agp)
931{
932 struct marvel_agp_aperture *aper = agp->aperture.sysdata;
933 int status;
934
935 status = iommu_release(aper->arena, aper->pg_start, aper->pg_count);
936 if (status == -EBUSY) {
937 printk(KERN_WARNING
938 "Attempted to release bound AGP memory - unbinding\n");
939 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count);
940 status = iommu_release(aper->arena, aper->pg_start,
941 aper->pg_count);
942 }
943 if (status < 0)
944 printk(KERN_ERR "Failed to release AGP memory\n");
945
946 kfree(aper);
947 kfree(agp);
948}
949
950static int
951marvel_agp_configure(alpha_agp_info *agp)
952{
953 io7_ioport_csrs *csrs = ((struct io7_port *)agp->hose->sysdata)->csrs;
954 struct io7 *io7 = ((struct io7_port *)agp->hose->sysdata)->io7;
955 unsigned int new_rate = 0;
956 unsigned long agp_pll;
957
958
959
960
961
962
963 agp_pll = io7->csrs->POx_RST[IO7_AGP_PORT].csr;
964 switch(IO7_PLL_RNGB(agp_pll)) {
965 case 0x4:
966
967
968
969
970 if (agp->mode.bits.rate != 2)
971 new_rate = 2;
972 break;
973
974 case 0x6:
975
976
977
978
979 if (agp->mode.bits.rate == 2)
980 new_rate = 1;
981 break;
982
983 default:
984
985
986
987
988 printk("%s: unknown PLL setting RNGB=%lx (PLL6_CTL=%016lx)\n",
989 __func__, IO7_PLL_RNGB(agp_pll), agp_pll);
990 break;
991 }
992
993
994
995
996 if (new_rate) {
997 printk("Requested AGP Rate %dX not compatible "
998 "with PLL setting - using %dX\n",
999 agp->mode.bits.rate,
1000 new_rate);
1001
1002 agp->mode.bits.rate = new_rate;
1003 }
1004
1005 printk("Enabling AGP on hose %d: %dX%s RQ %d\n",
1006 agp->hose->index, agp->mode.bits.rate,
1007 agp->mode.bits.sba ? " - SBA" : "", agp->mode.bits.rq);
1008
1009 csrs->AGP_CMD.csr = agp->mode.lw;
1010
1011 return 0;
1012}
1013
1014static int
1015marvel_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
1016{
1017 struct marvel_agp_aperture *aper = agp->aperture.sysdata;
1018 return iommu_bind(aper->arena, aper->pg_start + pg_start,
1019 mem->page_count, mem->pages);
1020}
1021
1022static int
1023marvel_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
1024{
1025 struct marvel_agp_aperture *aper = agp->aperture.sysdata;
1026 return iommu_unbind(aper->arena, aper->pg_start + pg_start,
1027 mem->page_count);
1028}
1029
1030static unsigned long
1031marvel_agp_translate(alpha_agp_info *agp, dma_addr_t addr)
1032{
1033 struct marvel_agp_aperture *aper = agp->aperture.sysdata;
1034 unsigned long baddr = addr - aper->arena->dma_base;
1035 unsigned long pte;
1036
1037 if (addr < agp->aperture.bus_base ||
1038 addr >= agp->aperture.bus_base + agp->aperture.size) {
1039 printk("%s: addr out of range\n", __func__);
1040 return -EINVAL;
1041 }
1042
1043 pte = aper->arena->ptes[baddr >> PAGE_SHIFT];
1044 if (!(pte & 1)) {
1045 printk("%s: pte not valid\n", __func__);
1046 return -EINVAL;
1047 }
1048 return (pte >> 1) << PAGE_SHIFT;
1049}
1050
1051struct alpha_agp_ops marvel_agp_ops =
1052{
1053 .setup = marvel_agp_setup,
1054 .cleanup = marvel_agp_cleanup,
1055 .configure = marvel_agp_configure,
1056 .bind = marvel_agp_bind_memory,
1057 .unbind = marvel_agp_unbind_memory,
1058 .translate = marvel_agp_translate
1059};
1060
1061alpha_agp_info *
1062marvel_agp_info(void)
1063{
1064 struct pci_controller *hose;
1065 io7_ioport_csrs *csrs;
1066 alpha_agp_info *agp;
1067 struct io7 *io7;
1068
1069
1070
1071
1072
1073
1074
1075 hose = NULL;
1076 for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; ) {
1077 struct pci_controller *h;
1078 vuip addr;
1079
1080 if (!io7->ports[IO7_AGP_PORT].enabled)
1081 continue;
1082
1083 h = io7->ports[IO7_AGP_PORT].hose;
1084 addr = (vuip)build_conf_addr(h, 0, PCI_DEVFN(5, 0), 0);
1085
1086 if (*addr != 0xffffffffu) {
1087 hose = h;
1088 break;
1089 }
1090 }
1091
1092 if (!hose || !hose->sg_pci)
1093 return NULL;
1094
1095 printk("MARVEL - using hose %d as AGP\n", hose->index);
1096
1097
1098
1099
1100 csrs = ((struct io7_port *)hose->sysdata)->csrs;
1101
1102
1103
1104
1105 agp = kmalloc(sizeof(*agp), GFP_KERNEL);
1106 if (!agp)
1107 return NULL;
1108
1109
1110
1111
1112 agp->hose = hose;
1113 agp->private = NULL;
1114 agp->ops = &marvel_agp_ops;
1115
1116
1117
1118
1119 agp->aperture.bus_base = 0;
1120 agp->aperture.size = 0;
1121 agp->aperture.sysdata = NULL;
1122
1123
1124
1125
1126
1127
1128
1129
1130 agp->capability.lw = csrs->AGP_STAT.csr;
1131 agp->capability.bits.rq = 0xf;
1132
1133
1134
1135
1136 agp->mode.lw = csrs->AGP_CMD.csr;
1137
1138 return agp;
1139}
1140