1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/types.h>
15#include <linux/ctype.h>
16#include <linux/agp_backend.h>
17#include <linux/init.h>
18#include <linux/mm.h>
19#include <linux/sched.h>
20#include <linux/string.h>
21#include <linux/spinlock.h>
22#include <linux/pci.h>
23#include <linux/topology.h>
24#include <linux/interrupt.h>
25#include <linux/bitmap.h>
26#include <linux/kdebug.h>
27#include <linux/scatterlist.h>
28#include <linux/iommu-helper.h>
29#include <linux/syscore_ops.h>
30#include <linux/io.h>
31#include <linux/gfp.h>
32#include <linux/atomic.h>
33#include <asm/mtrr.h>
34#include <asm/pgtable.h>
35#include <asm/proto.h>
36#include <asm/iommu.h>
37#include <asm/gart.h>
38#include <asm/cacheflush.h>
39#include <asm/swiotlb.h>
40#include <asm/dma.h>
41#include <asm/amd_nb.h>
42#include <asm/x86_init.h>
43#include <asm/iommu_table.h>
44
45static unsigned long iommu_bus_base;
46static unsigned long iommu_size;
47static unsigned long iommu_pages;
48
49static u32 *iommu_gatt_base;
50
51static dma_addr_t bad_dma_addr;
52
53
54
55
56
57
58
59
60static int iommu_fullflush = 1;
61
62
63static DEFINE_SPINLOCK(iommu_bitmap_lock);
64
65static unsigned long *iommu_gart_bitmap;
66
67static u32 gart_unmapped_entry;
68
69#define GPTE_VALID 1
70#define GPTE_COHERENT 2
71#define GPTE_ENCODE(x) \
72 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
73#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
74
75#define EMERGENCY_PAGES 32
76
77#ifdef CONFIG_AGP
78#define AGPEXTERN extern
79#else
80#define AGPEXTERN
81#endif
82
83
84#define GART_MAX_PHYS_ADDR (1ULL << 40)
85
86
87AGPEXTERN int agp_memory_reserved;
88AGPEXTERN __u32 *agp_gatt_table;
89
90static unsigned long next_bit;
91static bool need_flush;
92
93static unsigned long alloc_iommu(struct device *dev, int size,
94 unsigned long align_mask)
95{
96 unsigned long offset, flags;
97 unsigned long boundary_size;
98 unsigned long base_index;
99
100 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
101 PAGE_SIZE) >> PAGE_SHIFT;
102 boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1,
103 PAGE_SIZE) >> PAGE_SHIFT;
104
105 spin_lock_irqsave(&iommu_bitmap_lock, flags);
106 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
107 size, base_index, boundary_size, align_mask);
108 if (offset == -1) {
109 need_flush = true;
110 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
111 size, base_index, boundary_size,
112 align_mask);
113 }
114 if (offset != -1) {
115 next_bit = offset+size;
116 if (next_bit >= iommu_pages) {
117 next_bit = 0;
118 need_flush = true;
119 }
120 }
121 if (iommu_fullflush)
122 need_flush = true;
123 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
124
125 return offset;
126}
127
128static void free_iommu(unsigned long offset, int size)
129{
130 unsigned long flags;
131
132 spin_lock_irqsave(&iommu_bitmap_lock, flags);
133 bitmap_clear(iommu_gart_bitmap, offset, size);
134 if (offset >= next_bit)
135 next_bit = offset + size;
136 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
137}
138
139
140
141
142static void flush_gart(void)
143{
144 unsigned long flags;
145
146 spin_lock_irqsave(&iommu_bitmap_lock, flags);
147 if (need_flush) {
148 amd_flush_garts();
149 need_flush = false;
150 }
151 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
152}
153
154#ifdef CONFIG_IOMMU_LEAK
155
156static int leak_trace;
157static int iommu_leak_pages = 20;
158
159static void dump_leak(void)
160{
161 static int dump;
162
163 if (dump)
164 return;
165 dump = 1;
166
167 show_stack(NULL, NULL);
168 debug_dma_dump_mappings(NULL);
169}
170#endif
171
172static void iommu_full(struct device *dev, size_t size, int dir)
173{
174
175
176
177
178
179
180
181
182
183
184 dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
185
186 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
187 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
188 panic("PCI-DMA: Memory would be corrupted\n");
189 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
190 panic(KERN_ERR
191 "PCI-DMA: Random memory would be DMAed\n");
192 }
193#ifdef CONFIG_IOMMU_LEAK
194 dump_leak();
195#endif
196}
197
198static inline int
199need_iommu(struct device *dev, unsigned long addr, size_t size)
200{
201 return force_iommu || !dma_capable(dev, addr, size);
202}
203
204static inline int
205nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
206{
207 return !dma_capable(dev, addr, size);
208}
209
210
211
212
213static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
214 size_t size, int dir, unsigned long align_mask)
215{
216 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
217 unsigned long iommu_page;
218 int i;
219
220 if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
221 return bad_dma_addr;
222
223 iommu_page = alloc_iommu(dev, npages, align_mask);
224 if (iommu_page == -1) {
225 if (!nonforced_iommu(dev, phys_mem, size))
226 return phys_mem;
227 if (panic_on_overflow)
228 panic("dma_map_area overflow %lu bytes\n", size);
229 iommu_full(dev, size, dir);
230 return bad_dma_addr;
231 }
232
233 for (i = 0; i < npages; i++) {
234 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
235 phys_mem += PAGE_SIZE;
236 }
237 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
238}
239
240
241static dma_addr_t gart_map_page(struct device *dev, struct page *page,
242 unsigned long offset, size_t size,
243 enum dma_data_direction dir,
244 unsigned long attrs)
245{
246 unsigned long bus;
247 phys_addr_t paddr = page_to_phys(page) + offset;
248
249 if (!dev)
250 dev = &x86_dma_fallback_dev;
251
252 if (!need_iommu(dev, paddr, size))
253 return paddr;
254
255 bus = dma_map_area(dev, paddr, size, dir, 0);
256 flush_gart();
257
258 return bus;
259}
260
261
262
263
264static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
265 size_t size, enum dma_data_direction dir,
266 unsigned long attrs)
267{
268 unsigned long iommu_page;
269 int npages;
270 int i;
271
272 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
273 dma_addr >= iommu_bus_base + iommu_size)
274 return;
275
276 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
277 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
278 for (i = 0; i < npages; i++) {
279 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
280 }
281 free_iommu(iommu_page, npages);
282}
283
284
285
286
287static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
288 enum dma_data_direction dir, unsigned long attrs)
289{
290 struct scatterlist *s;
291 int i;
292
293 for_each_sg(sg, s, nents, i) {
294 if (!s->dma_length || !s->length)
295 break;
296 gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0);
297 }
298}
299
300
301static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
302 int nents, int dir)
303{
304 struct scatterlist *s;
305 int i;
306
307#ifdef CONFIG_IOMMU_DEBUG
308 pr_debug("dma_map_sg overflow\n");
309#endif
310
311 for_each_sg(sg, s, nents, i) {
312 unsigned long addr = sg_phys(s);
313
314 if (nonforced_iommu(dev, addr, s->length)) {
315 addr = dma_map_area(dev, addr, s->length, dir, 0);
316 if (addr == bad_dma_addr) {
317 if (i > 0)
318 gart_unmap_sg(dev, sg, i, dir, 0);
319 nents = 0;
320 sg[0].dma_length = 0;
321 break;
322 }
323 }
324 s->dma_address = addr;
325 s->dma_length = s->length;
326 }
327 flush_gart();
328
329 return nents;
330}
331
332
333static int __dma_map_cont(struct device *dev, struct scatterlist *start,
334 int nelems, struct scatterlist *sout,
335 unsigned long pages)
336{
337 unsigned long iommu_start = alloc_iommu(dev, pages, 0);
338 unsigned long iommu_page = iommu_start;
339 struct scatterlist *s;
340 int i;
341
342 if (iommu_start == -1)
343 return -1;
344
345 for_each_sg(start, s, nelems, i) {
346 unsigned long pages, addr;
347 unsigned long phys_addr = s->dma_address;
348
349 BUG_ON(s != start && s->offset);
350 if (s == start) {
351 sout->dma_address = iommu_bus_base;
352 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
353 sout->dma_length = s->length;
354 } else {
355 sout->dma_length += s->length;
356 }
357
358 addr = phys_addr;
359 pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
360 while (pages--) {
361 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
362 addr += PAGE_SIZE;
363 iommu_page++;
364 }
365 }
366 BUG_ON(iommu_page - iommu_start != pages);
367
368 return 0;
369}
370
371static inline int
372dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
373 struct scatterlist *sout, unsigned long pages, int need)
374{
375 if (!need) {
376 BUG_ON(nelems != 1);
377 sout->dma_address = start->dma_address;
378 sout->dma_length = start->length;
379 return 0;
380 }
381 return __dma_map_cont(dev, start, nelems, sout, pages);
382}
383
384
385
386
387
388static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
389 enum dma_data_direction dir, unsigned long attrs)
390{
391 struct scatterlist *s, *ps, *start_sg, *sgmap;
392 int need = 0, nextneed, i, out, start;
393 unsigned long pages = 0;
394 unsigned int seg_size;
395 unsigned int max_seg_size;
396
397 if (nents == 0)
398 return 0;
399
400 if (!dev)
401 dev = &x86_dma_fallback_dev;
402
403 out = 0;
404 start = 0;
405 start_sg = sg;
406 sgmap = sg;
407 seg_size = 0;
408 max_seg_size = dma_get_max_seg_size(dev);
409 ps = NULL;
410
411 for_each_sg(sg, s, nents, i) {
412 dma_addr_t addr = sg_phys(s);
413
414 s->dma_address = addr;
415 BUG_ON(s->length == 0);
416
417 nextneed = need_iommu(dev, addr, s->length);
418
419
420 if (i > start) {
421
422
423
424
425
426 if (!iommu_merge || !nextneed || !need || s->offset ||
427 (s->length + seg_size > max_seg_size) ||
428 (ps->offset + ps->length) % PAGE_SIZE) {
429 if (dma_map_cont(dev, start_sg, i - start,
430 sgmap, pages, need) < 0)
431 goto error;
432 out++;
433
434 seg_size = 0;
435 sgmap = sg_next(sgmap);
436 pages = 0;
437 start = i;
438 start_sg = s;
439 }
440 }
441
442 seg_size += s->length;
443 need = nextneed;
444 pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
445 ps = s;
446 }
447 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
448 goto error;
449 out++;
450 flush_gart();
451 if (out < nents) {
452 sgmap = sg_next(sgmap);
453 sgmap->dma_length = 0;
454 }
455 return out;
456
457error:
458 flush_gart();
459 gart_unmap_sg(dev, sg, out, dir, 0);
460
461
462 if (force_iommu || iommu_merge) {
463 out = dma_map_sg_nonforce(dev, sg, nents, dir);
464 if (out > 0)
465 return out;
466 }
467 if (panic_on_overflow)
468 panic("dma_map_sg: overflow on %lu pages\n", pages);
469
470 iommu_full(dev, pages << PAGE_SHIFT, dir);
471 for_each_sg(sg, s, nents, i)
472 s->dma_address = bad_dma_addr;
473 return 0;
474}
475
476
477static void *
478gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
479 gfp_t flag, unsigned long attrs)
480{
481 dma_addr_t paddr;
482 unsigned long align_mask;
483 struct page *page;
484
485 if (force_iommu && !(flag & GFP_DMA)) {
486 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
487 page = alloc_pages(flag | __GFP_ZERO, get_order(size));
488 if (!page)
489 return NULL;
490
491 align_mask = (1UL << get_order(size)) - 1;
492 paddr = dma_map_area(dev, page_to_phys(page), size,
493 DMA_BIDIRECTIONAL, align_mask);
494
495 flush_gart();
496 if (paddr != bad_dma_addr) {
497 *dma_addr = paddr;
498 return page_address(page);
499 }
500 __free_pages(page, get_order(size));
501 } else
502 return dma_generic_alloc_coherent(dev, size, dma_addr, flag,
503 attrs);
504
505 return NULL;
506}
507
508
509static void
510gart_free_coherent(struct device *dev, size_t size, void *vaddr,
511 dma_addr_t dma_addr, unsigned long attrs)
512{
513 gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
514 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
515}
516
517static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
518{
519 return (dma_addr == bad_dma_addr);
520}
521
522static int no_agp;
523
524static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
525{
526 unsigned long a;
527
528 if (!iommu_size) {
529 iommu_size = aper_size;
530 if (!no_agp)
531 iommu_size /= 2;
532 }
533
534 a = aper + iommu_size;
535 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
536
537 if (iommu_size < 64*1024*1024) {
538 pr_warning(
539 "PCI-DMA: Warning: Small IOMMU %luMB."
540 " Consider increasing the AGP aperture in BIOS\n",
541 iommu_size >> 20);
542 }
543
544 return iommu_size;
545}
546
547static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
548{
549 unsigned aper_size = 0, aper_base_32, aper_order;
550 u64 aper_base;
551
552 pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
553 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
554 aper_order = (aper_order >> 1) & 7;
555
556 aper_base = aper_base_32 & 0x7fff;
557 aper_base <<= 25;
558
559 aper_size = (32 * 1024 * 1024) << aper_order;
560 if (aper_base + aper_size > 0x100000000UL || !aper_size)
561 aper_base = 0;
562
563 *size = aper_size;
564 return aper_base;
565}
566
567static void enable_gart_translations(void)
568{
569 int i;
570
571 if (!amd_nb_has_feature(AMD_NB_GART))
572 return;
573
574 for (i = 0; i < amd_nb_num(); i++) {
575 struct pci_dev *dev = node_to_amd_nb(i)->misc;
576
577 enable_gart_translation(dev, __pa(agp_gatt_table));
578 }
579
580
581 amd_flush_garts();
582}
583
584
585
586
587
588static bool fix_up_north_bridges;
589static u32 aperture_order;
590static u32 aperture_alloc;
591
592void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
593{
594 fix_up_north_bridges = true;
595 aperture_order = aper_order;
596 aperture_alloc = aper_alloc;
597}
598
599static void gart_fixup_northbridges(void)
600{
601 int i;
602
603 if (!fix_up_north_bridges)
604 return;
605
606 if (!amd_nb_has_feature(AMD_NB_GART))
607 return;
608
609 pr_info("PCI-DMA: Restoring GART aperture settings\n");
610
611 for (i = 0; i < amd_nb_num(); i++) {
612 struct pci_dev *dev = node_to_amd_nb(i)->misc;
613
614
615
616
617
618 gart_set_size_and_enable(dev, aperture_order);
619 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
620 }
621}
622
623static void gart_resume(void)
624{
625 pr_info("PCI-DMA: Resuming GART IOMMU\n");
626
627 gart_fixup_northbridges();
628
629 enable_gart_translations();
630}
631
632static struct syscore_ops gart_syscore_ops = {
633 .resume = gart_resume,
634
635};
636
637
638
639
640
641static __init int init_amd_gatt(struct agp_kern_info *info)
642{
643 unsigned aper_size, gatt_size, new_aper_size;
644 unsigned aper_base, new_aper_base;
645 struct pci_dev *dev;
646 void *gatt;
647 int i;
648
649 pr_info("PCI-DMA: Disabling AGP.\n");
650
651 aper_size = aper_base = info->aper_size = 0;
652 dev = NULL;
653 for (i = 0; i < amd_nb_num(); i++) {
654 dev = node_to_amd_nb(i)->misc;
655 new_aper_base = read_aperture(dev, &new_aper_size);
656 if (!new_aper_base)
657 goto nommu;
658
659 if (!aper_base) {
660 aper_size = new_aper_size;
661 aper_base = new_aper_base;
662 }
663 if (aper_size != new_aper_size || aper_base != new_aper_base)
664 goto nommu;
665 }
666 if (!aper_base)
667 goto nommu;
668
669 info->aper_base = aper_base;
670 info->aper_size = aper_size >> 20;
671
672 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
673 gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
674 get_order(gatt_size));
675 if (!gatt)
676 panic("Cannot allocate GATT table");
677 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
678 panic("Could not set GART PTEs to uncacheable pages");
679
680 agp_gatt_table = gatt;
681
682 register_syscore_ops(&gart_syscore_ops);
683
684 flush_gart();
685
686 pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
687 aper_base, aper_size>>10);
688
689 return 0;
690
691 nommu:
692
693 pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n"
694 "falling back to iommu=soft.\n");
695 return -1;
696}
697
698static struct dma_map_ops gart_dma_ops = {
699 .map_sg = gart_map_sg,
700 .unmap_sg = gart_unmap_sg,
701 .map_page = gart_map_page,
702 .unmap_page = gart_unmap_page,
703 .alloc = gart_alloc_coherent,
704 .free = gart_free_coherent,
705 .mapping_error = gart_mapping_error,
706};
707
708static void gart_iommu_shutdown(void)
709{
710 struct pci_dev *dev;
711 int i;
712
713
714 if (!no_agp)
715 return;
716
717 if (!amd_nb_has_feature(AMD_NB_GART))
718 return;
719
720 for (i = 0; i < amd_nb_num(); i++) {
721 u32 ctl;
722
723 dev = node_to_amd_nb(i)->misc;
724 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
725
726 ctl &= ~GARTEN;
727
728 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
729 }
730}
731
732int __init gart_iommu_init(void)
733{
734 struct agp_kern_info info;
735 unsigned long iommu_start;
736 unsigned long aper_base, aper_size;
737 unsigned long start_pfn, end_pfn;
738 unsigned long scratch;
739 long i;
740
741 if (!amd_nb_has_feature(AMD_NB_GART))
742 return 0;
743
744#ifndef CONFIG_AGP_AMD64
745 no_agp = 1;
746#else
747
748
749 no_agp = no_agp ||
750 (agp_amd64_init() < 0) ||
751 (agp_copy_info(agp_bridge, &info) < 0);
752#endif
753
754 if (no_iommu ||
755 (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
756 !gart_iommu_aperture ||
757 (no_agp && init_amd_gatt(&info) < 0)) {
758 if (max_pfn > MAX_DMA32_PFN) {
759 pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
760 pr_warning("falling back to iommu=soft.\n");
761 }
762 return 0;
763 }
764
765
766 aper_size = info.aper_size << 20;
767 aper_base = info.aper_base;
768 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
769
770 start_pfn = PFN_DOWN(aper_base);
771 if (!pfn_range_is_mapped(start_pfn, end_pfn))
772 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
773
774 pr_info("PCI-DMA: using GART IOMMU.\n");
775 iommu_size = check_iommu_size(info.aper_base, aper_size);
776 iommu_pages = iommu_size >> PAGE_SHIFT;
777
778 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
779 get_order(iommu_pages/8));
780 if (!iommu_gart_bitmap)
781 panic("Cannot allocate iommu bitmap\n");
782
783#ifdef CONFIG_IOMMU_LEAK
784 if (leak_trace) {
785 int ret;
786
787 ret = dma_debug_resize_entries(iommu_pages);
788 if (ret)
789 pr_debug("PCI-DMA: Cannot trace all the entries\n");
790 }
791#endif
792
793
794
795
796
797 bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
798
799 pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
800 iommu_size >> 20);
801
802 agp_memory_reserved = iommu_size;
803 iommu_start = aper_size - iommu_size;
804 iommu_bus_base = info.aper_base + iommu_start;
805 bad_dma_addr = iommu_bus_base;
806 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
807
808
809
810
811
812
813
814
815
816
817 set_memory_np((unsigned long)__va(iommu_bus_base),
818 iommu_size >> PAGE_SHIFT);
819
820
821
822
823
824
825
826
827 wbinvd();
828
829
830
831
832
833
834
835 enable_gart_translations();
836
837
838
839
840
841
842
843 scratch = get_zeroed_page(GFP_KERNEL);
844 if (!scratch)
845 panic("Cannot allocate iommu scratch page");
846 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
847 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
848 iommu_gatt_base[i] = gart_unmapped_entry;
849
850 flush_gart();
851 dma_ops = &gart_dma_ops;
852 x86_platform.iommu_shutdown = gart_iommu_shutdown;
853 swiotlb = 0;
854
855 return 0;
856}
857
858void __init gart_parse_options(char *p)
859{
860 int arg;
861
862#ifdef CONFIG_IOMMU_LEAK
863 if (!strncmp(p, "leak", 4)) {
864 leak_trace = 1;
865 p += 4;
866 if (*p == '=')
867 ++p;
868 if (isdigit(*p) && get_option(&p, &arg))
869 iommu_leak_pages = arg;
870 }
871#endif
872 if (isdigit(*p) && get_option(&p, &arg))
873 iommu_size = arg;
874 if (!strncmp(p, "fullflush", 9))
875 iommu_fullflush = 1;
876 if (!strncmp(p, "nofullflush", 11))
877 iommu_fullflush = 0;
878 if (!strncmp(p, "noagp", 5))
879 no_agp = 1;
880 if (!strncmp(p, "noaperture", 10))
881 fix_aperture = 0;
882
883 if (!strncmp(p, "force", 5))
884 gart_iommu_aperture_allowed = 1;
885 if (!strncmp(p, "allowed", 7))
886 gart_iommu_aperture_allowed = 1;
887 if (!strncmp(p, "memaper", 7)) {
888 fallback_aper_force = 1;
889 p += 7;
890 if (*p == '=') {
891 ++p;
892 if (get_option(&p, &arg))
893 fallback_aper_order = arg;
894 }
895 }
896}
897IOMMU_INIT_POST(gart_iommu_hole_init);
898