1
2
3
4
5
6
7
8
9
10
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/init.h>
14#include <linux/agp_backend.h>
15#include <linux/mmzone.h>
16#include <asm/page.h>
17#include <asm/e820.h>
18#include <asm/amd_nb.h>
19#include <asm/gart.h>
20#include "agp.h"
21
22
23#define NVIDIA_X86_64_0_APBASE 0x10
24#define NVIDIA_X86_64_1_APBASE1 0x50
25#define NVIDIA_X86_64_1_APLIMIT1 0x54
26#define NVIDIA_X86_64_1_APSIZE 0xa8
27#define NVIDIA_X86_64_1_APBASE2 0xd8
28#define NVIDIA_X86_64_1_APLIMIT2 0xdc
29
30
31#define ULI_X86_64_BASE_ADDR 0x10
32#define ULI_X86_64_HTT_FEA_REG 0x50
33#define ULI_X86_64_ENU_SCR_REG 0x54
34
35static struct resource *aperture_resource;
36static int __initdata agp_try_unsupported = 1;
37static int agp_bridges_found;
38
39static void amd64_tlbflush(struct agp_memory *temp)
40{
41 amd_flush_garts();
42}
43
44static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
45{
46 int i, j, num_entries;
47 long long tmp;
48 int mask_type;
49 struct agp_bridge_data *bridge = mem->bridge;
50 u32 pte;
51
52 num_entries = agp_num_entries();
53
54 if (type != mem->type)
55 return -EINVAL;
56 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
57 if (mask_type != 0)
58 return -EINVAL;
59
60
61
62
63 if (((unsigned long)pg_start + mem->page_count) > num_entries)
64 return -EINVAL;
65
66 j = pg_start;
67
68
69 while (j < (pg_start + mem->page_count)) {
70 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j)))
71 return -EBUSY;
72 j++;
73 }
74
75 if (!mem->is_flushed) {
76 global_cache_flush();
77 mem->is_flushed = true;
78 }
79
80 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
81 tmp = agp_bridge->driver->mask_memory(agp_bridge,
82 page_to_phys(mem->pages[i]),
83 mask_type);
84
85 BUG_ON(tmp & 0xffffff0000000ffcULL);
86 pte = (tmp & 0x000000ff00000000ULL) >> 28;
87 pte |=(tmp & 0x00000000fffff000ULL);
88 pte |= GPTE_VALID | GPTE_COHERENT;
89
90 writel(pte, agp_bridge->gatt_table+j);
91 readl(agp_bridge->gatt_table+j);
92 }
93 amd64_tlbflush(mem);
94 return 0;
95}
96
97
98
99
100
101
102static struct aper_size_info_32 amd64_aperture_sizes[7] =
103{
104 {32, 8192, 3+(sizeof(long)/8), 0 },
105 {64, 16384, 4+(sizeof(long)/8), 1<<1 },
106 {128, 32768, 5+(sizeof(long)/8), 1<<2 },
107 {256, 65536, 6+(sizeof(long)/8), 1<<1 | 1<<2 },
108 {512, 131072, 7+(sizeof(long)/8), 1<<3 },
109 {1024, 262144, 8+(sizeof(long)/8), 1<<1 | 1<<3},
110 {2048, 524288, 9+(sizeof(long)/8), 1<<2 | 1<<3}
111};
112
113
114
115
116
117
118
119
120static int amd64_fetch_size(void)
121{
122 struct pci_dev *dev;
123 int i;
124 u32 temp;
125 struct aper_size_info_32 *values;
126
127 dev = node_to_amd_nb(0)->misc;
128 if (dev==NULL)
129 return 0;
130
131 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &temp);
132 temp = (temp & 0xe);
133 values = A_SIZE_32(amd64_aperture_sizes);
134
135 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
136 if (temp == values[i].size_value) {
137 agp_bridge->previous_size =
138 agp_bridge->current_size = (void *) (values + i);
139
140 agp_bridge->aperture_size_idx = i;
141 return values[i].size;
142 }
143 }
144 return 0;
145}
146
147
148
149
150
151static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table)
152{
153 u64 aperturebase;
154 u32 tmp;
155 u64 aper_base;
156
157
158 pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp);
159 aperturebase = tmp << 25;
160 aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
161
162 enable_gart_translation(hammer, gatt_table);
163
164 return aper_base;
165}
166
167
168static const struct aper_size_info_32 amd_8151_sizes[7] =
169{
170 {2048, 524288, 9, 0x00000000 },
171 {1024, 262144, 8, 0x00000400 },
172 {512, 131072, 7, 0x00000600 },
173 {256, 65536, 6, 0x00000700 },
174 {128, 32768, 5, 0x00000720 },
175 {64, 16384, 4, 0x00000730 },
176 {32, 8192, 3, 0x00000738 }
177};
178
179static int amd_8151_configure(void)
180{
181 unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
182 int i;
183
184 if (!amd_nb_has_feature(AMD_NB_GART))
185 return 0;
186
187
188 for (i = 0; i < amd_nb_num(); i++) {
189 agp_bridge->gart_bus_addr =
190 amd64_configure(node_to_amd_nb(i)->misc, gatt_bus);
191 }
192 amd_flush_garts();
193 return 0;
194}
195
196
197static void amd64_cleanup(void)
198{
199 u32 tmp;
200 int i;
201
202 if (!amd_nb_has_feature(AMD_NB_GART))
203 return;
204
205 for (i = 0; i < amd_nb_num(); i++) {
206 struct pci_dev *dev = node_to_amd_nb(i)->misc;
207
208 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp);
209 tmp &= ~GARTEN;
210 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp);
211 }
212}
213
214
215static const struct agp_bridge_driver amd_8151_driver = {
216 .owner = THIS_MODULE,
217 .aperture_sizes = amd_8151_sizes,
218 .size_type = U32_APER_SIZE,
219 .num_aperture_sizes = 7,
220 .needs_scratch_page = true,
221 .configure = amd_8151_configure,
222 .fetch_size = amd64_fetch_size,
223 .cleanup = amd64_cleanup,
224 .tlb_flush = amd64_tlbflush,
225 .mask_memory = agp_generic_mask_memory,
226 .masks = NULL,
227 .agp_enable = agp_generic_enable,
228 .cache_flush = global_cache_flush,
229 .create_gatt_table = agp_generic_create_gatt_table,
230 .free_gatt_table = agp_generic_free_gatt_table,
231 .insert_memory = amd64_insert_memory,
232 .remove_memory = agp_generic_remove_memory,
233 .alloc_by_type = agp_generic_alloc_by_type,
234 .free_by_type = agp_generic_free_by_type,
235 .agp_alloc_page = agp_generic_alloc_page,
236 .agp_alloc_pages = agp_generic_alloc_pages,
237 .agp_destroy_page = agp_generic_destroy_page,
238 .agp_destroy_pages = agp_generic_destroy_pages,
239 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
240};
241
242
243static int __devinit agp_aperture_valid(u64 aper, u32 size)
244{
245 if (!aperture_valid(aper, size, 32*1024*1024))
246 return 0;
247
248
249
250
251
252
253 if (!aperture_resource &&
254 !(aperture_resource = request_mem_region(aper, size, "aperture"))) {
255 printk(KERN_ERR PFX "Aperture conflicts with PCI mapping.\n");
256 return 0;
257 }
258 return 1;
259}
260
261
262
263
264
265
266
267
268
269
270static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
271 u16 cap)
272{
273 u32 aper_low, aper_hi;
274 u64 aper, nb_aper;
275 int order = 0;
276 u32 nb_order, nb_base;
277 u16 apsize;
278
279 pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order);
280 nb_order = (nb_order >> 1) & 7;
281 pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base);
282 nb_aper = nb_base << 25;
283
284
285
286 pci_read_config_word(agp, cap+0x14, &apsize);
287 if (apsize == 0xffff) {
288 if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order))
289 return 0;
290 return -1;
291 }
292
293 apsize &= 0xfff;
294
295 if (apsize & 0xff)
296 apsize |= 0xf00;
297 order = 7 - hweight16(apsize);
298
299 pci_read_config_dword(agp, 0x10, &aper_low);
300 pci_read_config_dword(agp, 0x14, &aper_hi);
301 aper = (aper_low & ~((1<<22)-1)) | ((u64)aper_hi << 32);
302
303
304
305
306
307 if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) {
308 dev_info(&agp->dev, "aperture size %u MB is not right, using settings from NB\n",
309 32 << order);
310 order = nb_order;
311 }
312
313 if (nb_order >= order) {
314 if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order))
315 return 0;
316 }
317
318 dev_info(&agp->dev, "aperture from AGP @ %Lx size %u MB\n",
319 aper, 32 << order);
320 if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order))
321 return -1;
322
323 gart_set_size_and_enable(nb, order);
324 pci_write_config_dword(nb, AMD64_GARTAPERTUREBASE, aper >> 25);
325
326 return 0;
327}
328
329static __devinit int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
330{
331 int i;
332
333 if (amd_cache_northbridges() < 0)
334 return -ENODEV;
335
336 if (!amd_nb_has_feature(AMD_NB_GART))
337 return -ENODEV;
338
339 i = 0;
340 for (i = 0; i < amd_nb_num(); i++) {
341 struct pci_dev *dev = node_to_amd_nb(i)->misc;
342 if (fix_northbridge(dev, pdev, cap_ptr) < 0) {
343 dev_err(&dev->dev, "no usable aperture found\n");
344#ifdef __x86_64__
345
346 dev_err(&dev->dev, "consider rebooting with iommu=memaper=2 to get a good aperture\n");
347#endif
348 return -1;
349 }
350 }
351 return 0;
352}
353
354
355static void __devinit amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge)
356{
357 char *revstring;
358
359 switch (pdev->revision) {
360 case 0x01: revstring="A0"; break;
361 case 0x02: revstring="A1"; break;
362 case 0x11: revstring="B0"; break;
363 case 0x12: revstring="B1"; break;
364 case 0x13: revstring="B2"; break;
365 case 0x14: revstring="B3"; break;
366 default: revstring="??"; break;
367 }
368
369 dev_info(&pdev->dev, "AMD 8151 AGP Bridge rev %s\n", revstring);
370
371
372
373
374
375 if (pdev->revision < 0x13) {
376 dev_info(&pdev->dev, "correcting AGP revision (reports 3.5, is really 3.0)\n");
377 bridge->major_version = 3;
378 bridge->minor_version = 0;
379 }
380}
381
382
383static const struct aper_size_info_32 uli_sizes[7] =
384{
385 {256, 65536, 6, 10},
386 {128, 32768, 5, 9},
387 {64, 16384, 4, 8},
388 {32, 8192, 3, 7},
389 {16, 4096, 2, 6},
390 {8, 2048, 1, 4},
391 {4, 1024, 0, 3}
392};
393static int __devinit uli_agp_init(struct pci_dev *pdev)
394{
395 u32 httfea,baseaddr,enuscr;
396 struct pci_dev *dev1;
397 int i, ret;
398 unsigned size = amd64_fetch_size();
399
400 dev_info(&pdev->dev, "setting up ULi AGP\n");
401 dev1 = pci_get_slot (pdev->bus,PCI_DEVFN(0,0));
402 if (dev1 == NULL) {
403 dev_info(&pdev->dev, "can't find ULi secondary device\n");
404 return -ENODEV;
405 }
406
407 for (i = 0; i < ARRAY_SIZE(uli_sizes); i++)
408 if (uli_sizes[i].size == size)
409 break;
410
411 if (i == ARRAY_SIZE(uli_sizes)) {
412 dev_info(&pdev->dev, "no ULi size found for %d\n", size);
413 ret = -ENODEV;
414 goto put;
415 }
416
417
418 pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE,
419 &httfea);
420
421
422 if ((httfea & 0x7fff) >> (32 - 25)) {
423 ret = -ENODEV;
424 goto put;
425 }
426
427 httfea = (httfea& 0x7fff) << 25;
428
429 pci_read_config_dword(pdev, ULI_X86_64_BASE_ADDR, &baseaddr);
430 baseaddr&= ~PCI_BASE_ADDRESS_MEM_MASK;
431 baseaddr|= httfea;
432 pci_write_config_dword(pdev, ULI_X86_64_BASE_ADDR, baseaddr);
433
434 enuscr= httfea+ (size * 1024 * 1024) - 1;
435 pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea);
436 pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr);
437 ret = 0;
438put:
439 pci_dev_put(dev1);
440 return ret;
441}
442
443
444static const struct aper_size_info_32 nforce3_sizes[5] =
445{
446 {512, 131072, 7, 0x00000000 },
447 {256, 65536, 6, 0x00000008 },
448 {128, 32768, 5, 0x0000000C },
449 {64, 16384, 4, 0x0000000E },
450 {32, 8192, 3, 0x0000000F }
451};
452
453
454
455static int nforce3_agp_init(struct pci_dev *pdev)
456{
457 u32 tmp, apbase, apbar, aplimit;
458 struct pci_dev *dev1;
459 int i, ret;
460 unsigned size = amd64_fetch_size();
461
462 dev_info(&pdev->dev, "setting up Nforce3 AGP\n");
463
464 dev1 = pci_get_slot(pdev->bus, PCI_DEVFN(11, 0));
465 if (dev1 == NULL) {
466 dev_info(&pdev->dev, "can't find Nforce3 secondary device\n");
467 return -ENODEV;
468 }
469
470 for (i = 0; i < ARRAY_SIZE(nforce3_sizes); i++)
471 if (nforce3_sizes[i].size == size)
472 break;
473
474 if (i == ARRAY_SIZE(nforce3_sizes)) {
475 dev_info(&pdev->dev, "no NForce3 size found for %d\n", size);
476 ret = -ENODEV;
477 goto put;
478 }
479
480 pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp);
481 tmp &= ~(0xf);
482 tmp |= nforce3_sizes[i].size_value;
483 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp);
484
485
486 pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE,
487 &apbase);
488
489
490 if ( (apbase & 0x7fff) >> (32 - 25) ) {
491 dev_info(&pdev->dev, "aperture base > 4G\n");
492 ret = -ENODEV;
493 goto put;
494 }
495
496 apbase = (apbase & 0x7fff) << 25;
497
498 pci_read_config_dword(pdev, NVIDIA_X86_64_0_APBASE, &apbar);
499 apbar &= ~PCI_BASE_ADDRESS_MEM_MASK;
500 apbar |= apbase;
501 pci_write_config_dword(pdev, NVIDIA_X86_64_0_APBASE, apbar);
502
503 aplimit = apbase + (size * 1024 * 1024) - 1;
504 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE1, apbase);
505 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT1, aplimit);
506 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase);
507 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit);
508
509 ret = 0;
510put:
511 pci_dev_put(dev1);
512
513 return ret;
514}
515
516static int __devinit agp_amd64_probe(struct pci_dev *pdev,
517 const struct pci_device_id *ent)
518{
519 struct agp_bridge_data *bridge;
520 u8 cap_ptr;
521 int err;
522
523
524 if (agp_bridges_found)
525 return -ENODEV;
526
527 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
528 if (!cap_ptr)
529 return -ENODEV;
530
531
532
533 bridge = agp_alloc_bridge();
534 if (!bridge)
535 return -ENOMEM;
536
537 if (pdev->vendor == PCI_VENDOR_ID_AMD &&
538 pdev->device == PCI_DEVICE_ID_AMD_8151_0) {
539 amd8151_init(pdev, bridge);
540 } else {
541 dev_info(&pdev->dev, "AGP bridge [%04x/%04x]\n",
542 pdev->vendor, pdev->device);
543 }
544
545 bridge->driver = &amd_8151_driver;
546 bridge->dev = pdev;
547 bridge->capndx = cap_ptr;
548
549
550 pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
551
552 if (cache_nbs(pdev, cap_ptr) == -1) {
553 agp_put_bridge(bridge);
554 return -ENODEV;
555 }
556
557 if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) {
558 int ret = nforce3_agp_init(pdev);
559 if (ret) {
560 agp_put_bridge(bridge);
561 return ret;
562 }
563 }
564
565 if (pdev->vendor == PCI_VENDOR_ID_AL) {
566 int ret = uli_agp_init(pdev);
567 if (ret) {
568 agp_put_bridge(bridge);
569 return ret;
570 }
571 }
572
573 pci_set_drvdata(pdev, bridge);
574 err = agp_add_bridge(bridge);
575 if (err < 0)
576 return err;
577
578 agp_bridges_found++;
579 return 0;
580}
581
582static void __devexit agp_amd64_remove(struct pci_dev *pdev)
583{
584 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
585
586 release_mem_region(virt_to_phys(bridge->gatt_table_real),
587 amd64_aperture_sizes[bridge->aperture_size_idx].size);
588 agp_remove_bridge(bridge);
589 agp_put_bridge(bridge);
590
591 agp_bridges_found--;
592}
593
594#ifdef CONFIG_PM
595
596static int agp_amd64_suspend(struct pci_dev *pdev, pm_message_t state)
597{
598 pci_save_state(pdev);
599 pci_set_power_state(pdev, pci_choose_state(pdev, state));
600
601 return 0;
602}
603
604static int agp_amd64_resume(struct pci_dev *pdev)
605{
606 pci_set_power_state(pdev, PCI_D0);
607 pci_restore_state(pdev);
608
609 if (pdev->vendor == PCI_VENDOR_ID_NVIDIA)
610 nforce3_agp_init(pdev);
611
612 return amd_8151_configure();
613}
614
615#endif
616
617static struct pci_device_id agp_amd64_pci_table[] = {
618 {
619 .class = (PCI_CLASS_BRIDGE_HOST << 8),
620 .class_mask = ~0,
621 .vendor = PCI_VENDOR_ID_AMD,
622 .device = PCI_DEVICE_ID_AMD_8151_0,
623 .subvendor = PCI_ANY_ID,
624 .subdevice = PCI_ANY_ID,
625 },
626
627 {
628 .class = (PCI_CLASS_BRIDGE_HOST << 8),
629 .class_mask = ~0,
630 .vendor = PCI_VENDOR_ID_AL,
631 .device = PCI_DEVICE_ID_AL_M1689,
632 .subvendor = PCI_ANY_ID,
633 .subdevice = PCI_ANY_ID,
634 },
635
636 {
637 .class = (PCI_CLASS_BRIDGE_HOST << 8),
638 .class_mask = ~0,
639 .vendor = PCI_VENDOR_ID_VIA,
640 .device = PCI_DEVICE_ID_VIA_K8T800PRO_0,
641 .subvendor = PCI_ANY_ID,
642 .subdevice = PCI_ANY_ID,
643 },
644
645 {
646 .class = (PCI_CLASS_BRIDGE_HOST << 8),
647 .class_mask = ~0,
648 .vendor = PCI_VENDOR_ID_VIA,
649 .device = PCI_DEVICE_ID_VIA_8385_0,
650 .subvendor = PCI_ANY_ID,
651 .subdevice = PCI_ANY_ID,
652 },
653
654 {
655 .class = (PCI_CLASS_BRIDGE_HOST << 8),
656 .class_mask = ~0,
657 .vendor = PCI_VENDOR_ID_VIA,
658 .device = PCI_DEVICE_ID_VIA_8380_0,
659 .subvendor = PCI_ANY_ID,
660 .subdevice = PCI_ANY_ID,
661 },
662
663 {
664 .class = (PCI_CLASS_BRIDGE_HOST << 8),
665 .class_mask = ~0,
666 .vendor = PCI_VENDOR_ID_VIA,
667 .device = PCI_DEVICE_ID_VIA_VT3336,
668 .subvendor = PCI_ANY_ID,
669 .subdevice = PCI_ANY_ID,
670 },
671
672 {
673 .class = (PCI_CLASS_BRIDGE_HOST << 8),
674 .class_mask = ~0,
675 .vendor = PCI_VENDOR_ID_VIA,
676 .device = PCI_DEVICE_ID_VIA_3238_0,
677 .subvendor = PCI_ANY_ID,
678 .subdevice = PCI_ANY_ID,
679 },
680
681 {
682 .class = (PCI_CLASS_BRIDGE_HOST << 8),
683 .class_mask = ~0,
684 .vendor = PCI_VENDOR_ID_VIA,
685 .device = PCI_DEVICE_ID_VIA_838X_1,
686 .subvendor = PCI_ANY_ID,
687 .subdevice = PCI_ANY_ID,
688 },
689
690 {
691 .class = (PCI_CLASS_BRIDGE_HOST << 8),
692 .class_mask = ~0,
693 .vendor = PCI_VENDOR_ID_NVIDIA,
694 .device = PCI_DEVICE_ID_NVIDIA_NFORCE3,
695 .subvendor = PCI_ANY_ID,
696 .subdevice = PCI_ANY_ID,
697 },
698 {
699 .class = (PCI_CLASS_BRIDGE_HOST << 8),
700 .class_mask = ~0,
701 .vendor = PCI_VENDOR_ID_NVIDIA,
702 .device = PCI_DEVICE_ID_NVIDIA_NFORCE3S,
703 .subvendor = PCI_ANY_ID,
704 .subdevice = PCI_ANY_ID,
705 },
706
707 {
708 .class = (PCI_CLASS_BRIDGE_HOST << 8),
709 .class_mask = ~0,
710 .vendor = PCI_VENDOR_ID_SI,
711 .device = PCI_DEVICE_ID_SI_755,
712 .subvendor = PCI_ANY_ID,
713 .subdevice = PCI_ANY_ID,
714 },
715
716 {
717 .class = (PCI_CLASS_BRIDGE_HOST << 8),
718 .class_mask = ~0,
719 .vendor = PCI_VENDOR_ID_SI,
720 .device = PCI_DEVICE_ID_SI_760,
721 .subvendor = PCI_ANY_ID,
722 .subdevice = PCI_ANY_ID,
723 },
724
725 {
726 .class = (PCI_CLASS_BRIDGE_HOST << 8),
727 .class_mask = ~0,
728 .vendor = PCI_VENDOR_ID_AL,
729 .device = 0x1695,
730 .subvendor = PCI_ANY_ID,
731 .subdevice = PCI_ANY_ID,
732 },
733
734 { }
735};
736
737MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
738
739static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = {
740 { PCI_DEVICE_CLASS(0, 0) },
741 { }
742};
743
744static struct pci_driver agp_amd64_pci_driver = {
745 .name = "agpgart-amd64",
746 .id_table = agp_amd64_pci_table,
747 .probe = agp_amd64_probe,
748 .remove = agp_amd64_remove,
749#ifdef CONFIG_PM
750 .suspend = agp_amd64_suspend,
751 .resume = agp_amd64_resume,
752#endif
753};
754
755
756
757int __init agp_amd64_init(void)
758{
759 int err = 0;
760
761 if (agp_off)
762 return -EINVAL;
763
764 err = pci_register_driver(&agp_amd64_pci_driver);
765 if (err < 0)
766 return err;
767
768 if (agp_bridges_found == 0) {
769 if (!agp_try_unsupported && !agp_try_unsupported_boot) {
770 printk(KERN_INFO PFX "No supported AGP bridge found.\n");
771#ifdef MODULE
772 printk(KERN_INFO PFX "You can try agp_try_unsupported=1\n");
773#else
774 printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
775#endif
776 pci_unregister_driver(&agp_amd64_pci_driver);
777 return -ENODEV;
778 }
779
780
781 if (!pci_dev_present(amd_nb_misc_ids)) {
782 pci_unregister_driver(&agp_amd64_pci_driver);
783 return -ENODEV;
784 }
785
786
787 agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
788 err = driver_attach(&agp_amd64_pci_driver.driver);
789 if (err == 0 && agp_bridges_found == 0) {
790 pci_unregister_driver(&agp_amd64_pci_driver);
791 err = -ENODEV;
792 }
793 }
794 return err;
795}
796
797static int __init agp_amd64_mod_init(void)
798{
799#ifndef MODULE
800 if (gart_iommu_aperture)
801 return agp_bridges_found ? 0 : -ENODEV;
802#endif
803 return agp_amd64_init();
804}
805
806static void __exit agp_amd64_cleanup(void)
807{
808#ifndef MODULE
809 if (gart_iommu_aperture)
810 return;
811#endif
812 if (aperture_resource)
813 release_resource(aperture_resource);
814 pci_unregister_driver(&agp_amd64_pci_driver);
815}
816
817module_init(agp_amd64_mod_init);
818module_exit(agp_amd64_cleanup);
819
820MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen");
821module_param(agp_try_unsupported, bool, 0);
822MODULE_LICENSE("GPL");
823