1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/pci.h>
21#include <linux/acpi.h>
22#include <linux/gfp.h>
23#include <linux/list.h>
24#include <linux/sysdev.h>
25#include <linux/interrupt.h>
26#include <linux/msi.h>
27#include <asm/pci-direct.h>
28#include <asm/amd_iommu_types.h>
29#include <asm/amd_iommu.h>
30#include <asm/iommu.h>
31#include <asm/gart.h>
32
33
34
35
36#define IVRS_HEADER_LENGTH 48
37
38#define ACPI_IVHD_TYPE 0x10
39#define ACPI_IVMD_TYPE_ALL 0x20
40#define ACPI_IVMD_TYPE 0x21
41#define ACPI_IVMD_TYPE_RANGE 0x22
42
43#define IVHD_DEV_ALL 0x01
44#define IVHD_DEV_SELECT 0x02
45#define IVHD_DEV_SELECT_RANGE_START 0x03
46#define IVHD_DEV_RANGE_END 0x04
47#define IVHD_DEV_ALIAS 0x42
48#define IVHD_DEV_ALIAS_RANGE 0x43
49#define IVHD_DEV_EXT_SELECT 0x46
50#define IVHD_DEV_EXT_SELECT_RANGE 0x47
51
52#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
53#define IVHD_FLAG_PASSPW_EN_MASK 0x02
54#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
55#define IVHD_FLAG_ISOC_EN_MASK 0x08
56
57#define IVMD_FLAG_EXCL_RANGE 0x08
58#define IVMD_FLAG_UNITY_MAP 0x01
59
60#define ACPI_DEVFLAG_INITPASS 0x01
61#define ACPI_DEVFLAG_EXTINT 0x02
62#define ACPI_DEVFLAG_NMI 0x04
63#define ACPI_DEVFLAG_SYSMGT1 0x10
64#define ACPI_DEVFLAG_SYSMGT2 0x20
65#define ACPI_DEVFLAG_LINT0 0x40
66#define ACPI_DEVFLAG_LINT1 0x80
67#define ACPI_DEVFLAG_ATSDIS 0x10000000
68
69
70
71
72
73
74
75
76
77
78
79
80struct ivhd_header {
81 u8 type;
82 u8 flags;
83 u16 length;
84 u16 devid;
85 u16 cap_ptr;
86 u64 mmio_phys;
87 u16 pci_seg;
88 u16 info;
89 u32 reserved;
90} __attribute__((packed));
91
92
93
94
95
96struct ivhd_entry {
97 u8 type;
98 u16 devid;
99 u8 flags;
100 u32 ext;
101} __attribute__((packed));
102
103
104
105
106
107struct ivmd_header {
108 u8 type;
109 u8 flags;
110 u16 length;
111 u16 devid;
112 u16 aux;
113 u64 resv;
114 u64 range_start;
115 u64 range_length;
116} __attribute__((packed));
117
118bool amd_iommu_dump;
119
120static int __initdata amd_iommu_detected;
121
122u16 amd_iommu_last_bdf;
123
124LIST_HEAD(amd_iommu_unity_map);
125
126#ifdef CONFIG_IOMMU_STRESS
127bool amd_iommu_isolate = false;
128#else
129bool amd_iommu_isolate = true;
130
131#endif
132
133bool amd_iommu_unmap_flush;
134
135LIST_HEAD(amd_iommu_list);
136
137
138
139
140
141
142
143
144struct dev_table_entry *amd_iommu_dev_table;
145
146
147
148
149
150
151u16 *amd_iommu_alias_table;
152
153
154
155
156
157struct amd_iommu **amd_iommu_rlookup_table;
158
159
160
161
162
163struct protection_domain **amd_iommu_pd_table;
164
165
166
167
168
169unsigned long *amd_iommu_pd_alloc_bitmap;
170
171static u32 dev_table_size;
172static u32 alias_table_size;
173static u32 rlookup_table_size;
174
175static inline void update_last_devid(u16 devid)
176{
177 if (devid > amd_iommu_last_bdf)
178 amd_iommu_last_bdf = devid;
179}
180
181static inline unsigned long tbl_size(int entry_size)
182{
183 unsigned shift = PAGE_SHIFT +
184 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
185
186 return 1UL << shift;
187}
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202static void iommu_set_exclusion_range(struct amd_iommu *iommu)
203{
204 u64 start = iommu->exclusion_start & PAGE_MASK;
205 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
206 u64 entry;
207
208 if (!iommu->exclusion_start)
209 return;
210
211 entry = start | MMIO_EXCL_ENABLE_MASK;
212 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
213 &entry, sizeof(entry));
214
215 entry = limit;
216 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
217 &entry, sizeof(entry));
218}
219
220
221static void __init iommu_set_device_table(struct amd_iommu *iommu)
222{
223 u64 entry;
224
225 BUG_ON(iommu->mmio_base == NULL);
226
227 entry = virt_to_phys(amd_iommu_dev_table);
228 entry |= (dev_table_size >> 12) - 1;
229 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
230 &entry, sizeof(entry));
231}
232
233
234static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
235{
236 u32 ctrl;
237
238 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
239 ctrl |= (1 << bit);
240 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
241}
242
243static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
244{
245 u32 ctrl;
246
247 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
248 ctrl &= ~(1 << bit);
249 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
250}
251
252
253static void iommu_enable(struct amd_iommu *iommu)
254{
255 printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n",
256 dev_name(&iommu->dev->dev), iommu->cap_ptr);
257
258 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
259}
260
261static void iommu_disable(struct amd_iommu *iommu)
262{
263
264 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
265
266
267 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
268 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
269
270
271 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
272}
273
274
275
276
277
278static u8 * __init iommu_map_mmio_space(u64 address)
279{
280 u8 *ret;
281
282 if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu"))
283 return NULL;
284
285 ret = ioremap_nocache(address, MMIO_REGION_LENGTH);
286 if (ret != NULL)
287 return ret;
288
289 release_mem_region(address, MMIO_REGION_LENGTH);
290
291 return NULL;
292}
293
294static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
295{
296 if (iommu->mmio_base)
297 iounmap(iommu->mmio_base);
298 release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH);
299}
300
301
302
303
304
305
306
307
308
309
310
311
312
313static inline int ivhd_entry_length(u8 *ivhd)
314{
315 return 0x04 << (*ivhd >> 6);
316}
317
318
319
320
321
322static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
323{
324 u32 cap;
325
326 cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
327 update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
328
329 return 0;
330}
331
332
333
334
335
336static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
337{
338 u8 *p = (void *)h, *end = (void *)h;
339 struct ivhd_entry *dev;
340
341 p += sizeof(*h);
342 end += h->length;
343
344 find_last_devid_on_pci(PCI_BUS(h->devid),
345 PCI_SLOT(h->devid),
346 PCI_FUNC(h->devid),
347 h->cap_ptr);
348
349 while (p < end) {
350 dev = (struct ivhd_entry *)p;
351 switch (dev->type) {
352 case IVHD_DEV_SELECT:
353 case IVHD_DEV_RANGE_END:
354 case IVHD_DEV_ALIAS:
355 case IVHD_DEV_EXT_SELECT:
356
357 update_last_devid(dev->devid);
358 break;
359 default:
360 break;
361 }
362 p += ivhd_entry_length(p);
363 }
364
365 WARN_ON(p != end);
366
367 return 0;
368}
369
370
371
372
373
374
375static int __init find_last_devid_acpi(struct acpi_table_header *table)
376{
377 int i;
378 u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
379 struct ivhd_header *h;
380
381
382
383
384
385 for (i = 0; i < table->length; ++i)
386 checksum += p[i];
387 if (checksum != 0)
388
389 return -ENODEV;
390
391 p += IVRS_HEADER_LENGTH;
392
393 end += table->length;
394 while (p < end) {
395 h = (struct ivhd_header *)p;
396 switch (h->type) {
397 case ACPI_IVHD_TYPE:
398 find_last_devid_from_ivhd(h);
399 break;
400 default:
401 break;
402 }
403 p += h->length;
404 }
405 WARN_ON(p != end);
406
407 return 0;
408}
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
425{
426 u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
427 get_order(CMD_BUFFER_SIZE));
428
429 if (cmd_buf == NULL)
430 return NULL;
431
432 iommu->cmd_buf_size = CMD_BUFFER_SIZE;
433
434 return cmd_buf;
435}
436
437
438
439
440
441void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
442{
443 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
444
445 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
446 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
447
448 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
449}
450
451
452
453
454
455static void iommu_enable_command_buffer(struct amd_iommu *iommu)
456{
457 u64 entry;
458
459 BUG_ON(iommu->cmd_buf == NULL);
460
461 entry = (u64)virt_to_phys(iommu->cmd_buf);
462 entry |= MMIO_CMD_SIZE_512;
463
464 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
465 &entry, sizeof(entry));
466
467 amd_iommu_reset_cmd_buffer(iommu);
468}
469
470static void __init free_command_buffer(struct amd_iommu *iommu)
471{
472 free_pages((unsigned long)iommu->cmd_buf,
473 get_order(iommu->cmd_buf_size));
474}
475
476
477static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
478{
479 iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
480 get_order(EVT_BUFFER_SIZE));
481
482 if (iommu->evt_buf == NULL)
483 return NULL;
484
485 iommu->evt_buf_size = EVT_BUFFER_SIZE;
486
487 return iommu->evt_buf;
488}
489
490static void iommu_enable_event_buffer(struct amd_iommu *iommu)
491{
492 u64 entry;
493
494 BUG_ON(iommu->evt_buf == NULL);
495
496 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
497
498 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
499 &entry, sizeof(entry));
500
501
502 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
503 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
504
505 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
506}
507
508static void __init free_event_buffer(struct amd_iommu *iommu)
509{
510 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
511}
512
513
514static void set_dev_entry_bit(u16 devid, u8 bit)
515{
516 int i = (bit >> 5) & 0x07;
517 int _bit = bit & 0x1f;
518
519 amd_iommu_dev_table[devid].data[i] |= (1 << _bit);
520}
521
522static int get_dev_entry_bit(u16 devid, u8 bit)
523{
524 int i = (bit >> 5) & 0x07;
525 int _bit = bit & 0x1f;
526
527 return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit;
528}
529
530
531void amd_iommu_apply_erratum_63(u16 devid)
532{
533 int sysmgt;
534
535 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
536 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
537
538 if (sysmgt == 0x01)
539 set_dev_entry_bit(devid, DEV_ENTRY_IW);
540}
541
542
543static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
544{
545 amd_iommu_rlookup_table[devid] = iommu;
546}
547
548
549
550
551
552static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
553 u16 devid, u32 flags, u32 ext_flags)
554{
555 if (flags & ACPI_DEVFLAG_INITPASS)
556 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
557 if (flags & ACPI_DEVFLAG_EXTINT)
558 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
559 if (flags & ACPI_DEVFLAG_NMI)
560 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
561 if (flags & ACPI_DEVFLAG_SYSMGT1)
562 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
563 if (flags & ACPI_DEVFLAG_SYSMGT2)
564 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
565 if (flags & ACPI_DEVFLAG_LINT0)
566 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
567 if (flags & ACPI_DEVFLAG_LINT1)
568 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
569
570 amd_iommu_apply_erratum_63(devid);
571
572 set_iommu_for_device(iommu, devid);
573}
574
575
576
577
578
579static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
580{
581 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
582
583 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
584 return;
585
586 if (iommu) {
587
588
589
590
591
592 set_dev_entry_bit(m->devid, DEV_ENTRY_EX);
593 iommu->exclusion_start = m->range_start;
594 iommu->exclusion_length = m->range_length;
595 }
596}
597
598
599
600
601
602
603static void __init init_iommu_from_pci(struct amd_iommu *iommu)
604{
605 int cap_ptr = iommu->cap_ptr;
606 u32 range, misc;
607
608 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
609 &iommu->cap);
610 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
611 &range);
612 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
613 &misc);
614
615 iommu->first_device = calc_devid(MMIO_GET_BUS(range),
616 MMIO_GET_FD(range));
617 iommu->last_device = calc_devid(MMIO_GET_BUS(range),
618 MMIO_GET_LD(range));
619 iommu->evt_msi_num = MMIO_MSI_NUM(misc);
620}
621
622
623
624
625
626static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
627 struct ivhd_header *h)
628{
629 u8 *p = (u8 *)h;
630 u8 *end = p, flags = 0;
631 u16 dev_i, devid = 0, devid_start = 0, devid_to = 0;
632 u32 ext_flags = 0;
633 bool alias = false;
634 struct ivhd_entry *e;
635
636
637
638
639
640 h->flags & IVHD_FLAG_HT_TUN_EN_MASK ?
641 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
642 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
643
644 h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
645 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
646 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
647
648 h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
649 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
650 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
651
652 h->flags & IVHD_FLAG_ISOC_EN_MASK ?
653 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
654 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
655
656
657
658
659 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
660
661
662
663
664 p += sizeof(struct ivhd_header);
665 end += h->length;
666
667
668 while (p < end) {
669 e = (struct ivhd_entry *)p;
670 switch (e->type) {
671 case IVHD_DEV_ALL:
672
673 DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x"
674 " last device %02x:%02x.%x flags: %02x\n",
675 PCI_BUS(iommu->first_device),
676 PCI_SLOT(iommu->first_device),
677 PCI_FUNC(iommu->first_device),
678 PCI_BUS(iommu->last_device),
679 PCI_SLOT(iommu->last_device),
680 PCI_FUNC(iommu->last_device),
681 e->flags);
682
683 for (dev_i = iommu->first_device;
684 dev_i <= iommu->last_device; ++dev_i)
685 set_dev_entry_from_acpi(iommu, dev_i,
686 e->flags, 0);
687 break;
688 case IVHD_DEV_SELECT:
689
690 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
691 "flags: %02x\n",
692 PCI_BUS(e->devid),
693 PCI_SLOT(e->devid),
694 PCI_FUNC(e->devid),
695 e->flags);
696
697 devid = e->devid;
698 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
699 break;
700 case IVHD_DEV_SELECT_RANGE_START:
701
702 DUMP_printk(" DEV_SELECT_RANGE_START\t "
703 "devid: %02x:%02x.%x flags: %02x\n",
704 PCI_BUS(e->devid),
705 PCI_SLOT(e->devid),
706 PCI_FUNC(e->devid),
707 e->flags);
708
709 devid_start = e->devid;
710 flags = e->flags;
711 ext_flags = 0;
712 alias = false;
713 break;
714 case IVHD_DEV_ALIAS:
715
716 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
717 "flags: %02x devid_to: %02x:%02x.%x\n",
718 PCI_BUS(e->devid),
719 PCI_SLOT(e->devid),
720 PCI_FUNC(e->devid),
721 e->flags,
722 PCI_BUS(e->ext >> 8),
723 PCI_SLOT(e->ext >> 8),
724 PCI_FUNC(e->ext >> 8));
725
726 devid = e->devid;
727 devid_to = e->ext >> 8;
728 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
729 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
730 amd_iommu_alias_table[devid] = devid_to;
731 break;
732 case IVHD_DEV_ALIAS_RANGE:
733
734 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
735 "devid: %02x:%02x.%x flags: %02x "
736 "devid_to: %02x:%02x.%x\n",
737 PCI_BUS(e->devid),
738 PCI_SLOT(e->devid),
739 PCI_FUNC(e->devid),
740 e->flags,
741 PCI_BUS(e->ext >> 8),
742 PCI_SLOT(e->ext >> 8),
743 PCI_FUNC(e->ext >> 8));
744
745 devid_start = e->devid;
746 flags = e->flags;
747 devid_to = e->ext >> 8;
748 ext_flags = 0;
749 alias = true;
750 break;
751 case IVHD_DEV_EXT_SELECT:
752
753 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
754 "flags: %02x ext: %08x\n",
755 PCI_BUS(e->devid),
756 PCI_SLOT(e->devid),
757 PCI_FUNC(e->devid),
758 e->flags, e->ext);
759
760 devid = e->devid;
761 set_dev_entry_from_acpi(iommu, devid, e->flags,
762 e->ext);
763 break;
764 case IVHD_DEV_EXT_SELECT_RANGE:
765
766 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
767 "%02x:%02x.%x flags: %02x ext: %08x\n",
768 PCI_BUS(e->devid),
769 PCI_SLOT(e->devid),
770 PCI_FUNC(e->devid),
771 e->flags, e->ext);
772
773 devid_start = e->devid;
774 flags = e->flags;
775 ext_flags = e->ext;
776 alias = false;
777 break;
778 case IVHD_DEV_RANGE_END:
779
780 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
781 PCI_BUS(e->devid),
782 PCI_SLOT(e->devid),
783 PCI_FUNC(e->devid));
784
785 devid = e->devid;
786 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
787 if (alias) {
788 amd_iommu_alias_table[dev_i] = devid_to;
789 set_dev_entry_from_acpi(iommu,
790 devid_to, flags, ext_flags);
791 }
792 set_dev_entry_from_acpi(iommu, dev_i,
793 flags, ext_flags);
794 }
795 break;
796 default:
797 break;
798 }
799
800 p += ivhd_entry_length(p);
801 }
802}
803
804
805static int __init init_iommu_devices(struct amd_iommu *iommu)
806{
807 u16 i;
808
809 for (i = iommu->first_device; i <= iommu->last_device; ++i)
810 set_iommu_for_device(iommu, i);
811
812 return 0;
813}
814
815static void __init free_iommu_one(struct amd_iommu *iommu)
816{
817 free_command_buffer(iommu);
818 free_event_buffer(iommu);
819 iommu_unmap_mmio_space(iommu);
820}
821
822static void __init free_iommu_all(void)
823{
824 struct amd_iommu *iommu, *next;
825
826 for_each_iommu_safe(iommu, next) {
827 list_del(&iommu->list);
828 free_iommu_one(iommu);
829 kfree(iommu);
830 }
831}
832
833
834
835
836
837
838static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
839{
840 spin_lock_init(&iommu->lock);
841 list_add_tail(&iommu->list, &amd_iommu_list);
842
843
844
845
846 iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff);
847 if (!iommu->dev)
848 return 1;
849
850 iommu->cap_ptr = h->cap_ptr;
851 iommu->pci_seg = h->pci_seg;
852 iommu->mmio_phys = h->mmio_phys;
853 iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys);
854 if (!iommu->mmio_base)
855 return -ENOMEM;
856
857 iommu->cmd_buf = alloc_command_buffer(iommu);
858 if (!iommu->cmd_buf)
859 return -ENOMEM;
860
861 iommu->evt_buf = alloc_event_buffer(iommu);
862 if (!iommu->evt_buf)
863 return -ENOMEM;
864
865 iommu->int_enabled = false;
866
867 init_iommu_from_pci(iommu);
868 init_iommu_from_acpi(iommu, h);
869 init_iommu_devices(iommu);
870
871 return pci_enable_device(iommu->dev);
872}
873
874
875
876
877
878static int __init init_iommu_all(struct acpi_table_header *table)
879{
880 u8 *p = (u8 *)table, *end = (u8 *)table;
881 struct ivhd_header *h;
882 struct amd_iommu *iommu;
883 int ret;
884
885 end += table->length;
886 p += IVRS_HEADER_LENGTH;
887
888 while (p < end) {
889 h = (struct ivhd_header *)p;
890 switch (*p) {
891 case ACPI_IVHD_TYPE:
892
893 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
894 "seg: %d flags: %01x info %04x\n",
895 PCI_BUS(h->devid), PCI_SLOT(h->devid),
896 PCI_FUNC(h->devid), h->cap_ptr,
897 h->pci_seg, h->flags, h->info);
898 DUMP_printk(" mmio-addr: %016llx\n",
899 h->mmio_phys);
900
901 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
902 if (iommu == NULL)
903 return -ENOMEM;
904 ret = init_iommu_one(iommu, h);
905 if (ret)
906 return ret;
907 break;
908 default:
909 break;
910 }
911 p += h->length;
912
913 }
914 WARN_ON(p != end);
915
916 return 0;
917}
918
919
920
921
922
923
924
925
926
927
928static int __init iommu_setup_msi(struct amd_iommu *iommu)
929{
930 int r;
931
932 if (pci_enable_msi(iommu->dev))
933 return 1;
934
935 r = request_irq(iommu->dev->irq, amd_iommu_int_handler,
936 IRQF_SAMPLE_RANDOM,
937 "AMD-Vi",
938 NULL);
939
940 if (r) {
941 pci_disable_msi(iommu->dev);
942 return 1;
943 }
944
945 iommu->int_enabled = true;
946 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
947
948 return 0;
949}
950
951static int iommu_init_msi(struct amd_iommu *iommu)
952{
953 if (iommu->int_enabled)
954 return 0;
955
956 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
957 return iommu_setup_msi(iommu);
958
959 return 1;
960}
961
962
963
964
965
966
967
968
969
970static void __init free_unity_maps(void)
971{
972 struct unity_map_entry *entry, *next;
973
974 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
975 list_del(&entry->list);
976 kfree(entry);
977 }
978}
979
980
981static int __init init_exclusion_range(struct ivmd_header *m)
982{
983 int i;
984
985 switch (m->type) {
986 case ACPI_IVMD_TYPE:
987 set_device_exclusion_range(m->devid, m);
988 break;
989 case ACPI_IVMD_TYPE_ALL:
990 for (i = 0; i <= amd_iommu_last_bdf; ++i)
991 set_device_exclusion_range(i, m);
992 break;
993 case ACPI_IVMD_TYPE_RANGE:
994 for (i = m->devid; i <= m->aux; ++i)
995 set_device_exclusion_range(i, m);
996 break;
997 default:
998 break;
999 }
1000
1001 return 0;
1002}
1003
1004
1005static int __init init_unity_map_range(struct ivmd_header *m)
1006{
1007 struct unity_map_entry *e = 0;
1008 char *s;
1009
1010 e = kzalloc(sizeof(*e), GFP_KERNEL);
1011 if (e == NULL)
1012 return -ENOMEM;
1013
1014 switch (m->type) {
1015 default:
1016 kfree(e);
1017 return 0;
1018 case ACPI_IVMD_TYPE:
1019 s = "IVMD_TYPEi\t\t\t";
1020 e->devid_start = e->devid_end = m->devid;
1021 break;
1022 case ACPI_IVMD_TYPE_ALL:
1023 s = "IVMD_TYPE_ALL\t\t";
1024 e->devid_start = 0;
1025 e->devid_end = amd_iommu_last_bdf;
1026 break;
1027 case ACPI_IVMD_TYPE_RANGE:
1028 s = "IVMD_TYPE_RANGE\t\t";
1029 e->devid_start = m->devid;
1030 e->devid_end = m->aux;
1031 break;
1032 }
1033 e->address_start = PAGE_ALIGN(m->range_start);
1034 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
1035 e->prot = m->flags >> 1;
1036
1037 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
1038 " range_start: %016llx range_end: %016llx flags: %x\n", s,
1039 PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start),
1040 PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end),
1041 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
1042 e->address_start, e->address_end, m->flags);
1043
1044 list_add_tail(&e->list, &amd_iommu_unity_map);
1045
1046 return 0;
1047}
1048
1049
1050static int __init init_memory_definitions(struct acpi_table_header *table)
1051{
1052 u8 *p = (u8 *)table, *end = (u8 *)table;
1053 struct ivmd_header *m;
1054
1055 end += table->length;
1056 p += IVRS_HEADER_LENGTH;
1057
1058 while (p < end) {
1059 m = (struct ivmd_header *)p;
1060 if (m->flags & IVMD_FLAG_EXCL_RANGE)
1061 init_exclusion_range(m);
1062 else if (m->flags & IVMD_FLAG_UNITY_MAP)
1063 init_unity_map_range(m);
1064
1065 p += m->length;
1066 }
1067
1068 return 0;
1069}
1070
1071
1072
1073
1074
1075static void init_device_table(void)
1076{
1077 u16 devid;
1078
1079 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1080 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
1081 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
1082 }
1083}
1084
1085
1086
1087
1088
1089static void enable_iommus(void)
1090{
1091 struct amd_iommu *iommu;
1092
1093 for_each_iommu(iommu) {
1094 iommu_disable(iommu);
1095 iommu_set_device_table(iommu);
1096 iommu_enable_command_buffer(iommu);
1097 iommu_enable_event_buffer(iommu);
1098 iommu_set_exclusion_range(iommu);
1099 iommu_init_msi(iommu);
1100 iommu_enable(iommu);
1101 }
1102}
1103
1104static void disable_iommus(void)
1105{
1106 struct amd_iommu *iommu;
1107
1108 for_each_iommu(iommu)
1109 iommu_disable(iommu);
1110}
1111
1112
1113
1114
1115
1116
1117static int amd_iommu_resume(struct sys_device *dev)
1118{
1119
1120 enable_iommus();
1121
1122
1123
1124
1125
1126 amd_iommu_flush_all_devices();
1127 amd_iommu_flush_all_domains();
1128
1129 return 0;
1130}
1131
1132static int amd_iommu_suspend(struct sys_device *dev, pm_message_t state)
1133{
1134
1135 disable_iommus();
1136
1137 return 0;
1138}
1139
1140static struct sysdev_class amd_iommu_sysdev_class = {
1141 .name = "amd_iommu",
1142 .suspend = amd_iommu_suspend,
1143 .resume = amd_iommu_resume,
1144};
1145
1146static struct sys_device device_amd_iommu = {
1147 .id = 0,
1148 .cls = &amd_iommu_sysdev_class,
1149};
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179int __init amd_iommu_init(void)
1180{
1181 int i, ret = 0;
1182
1183
1184 if (no_iommu) {
1185 printk(KERN_INFO "AMD-Vi disabled by kernel command line\n");
1186 return 0;
1187 }
1188
1189 if (!amd_iommu_detected)
1190 return -ENODEV;
1191
1192
1193
1194
1195
1196
1197 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)
1198 return -ENODEV;
1199
1200 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
1201 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
1202 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
1203
1204 ret = -ENOMEM;
1205
1206
1207 amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1208 get_order(dev_table_size));
1209 if (amd_iommu_dev_table == NULL)
1210 goto out;
1211
1212
1213
1214
1215
1216 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
1217 get_order(alias_table_size));
1218 if (amd_iommu_alias_table == NULL)
1219 goto free;
1220
1221
1222 amd_iommu_rlookup_table = (void *)__get_free_pages(
1223 GFP_KERNEL | __GFP_ZERO,
1224 get_order(rlookup_table_size));
1225 if (amd_iommu_rlookup_table == NULL)
1226 goto free;
1227
1228
1229
1230
1231
1232 amd_iommu_pd_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1233 get_order(rlookup_table_size));
1234 if (amd_iommu_pd_table == NULL)
1235 goto free;
1236
1237 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
1238 GFP_KERNEL | __GFP_ZERO,
1239 get_order(MAX_DOMAIN_ID/8));
1240 if (amd_iommu_pd_alloc_bitmap == NULL)
1241 goto free;
1242
1243
1244 init_device_table();
1245
1246
1247
1248
1249 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1250 amd_iommu_alias_table[i] = i;
1251
1252
1253
1254
1255
1256 amd_iommu_pd_alloc_bitmap[0] = 1;
1257
1258
1259
1260
1261
1262 ret = -ENODEV;
1263 if (acpi_table_parse("IVRS", init_iommu_all) != 0)
1264 goto free;
1265
1266 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
1267 goto free;
1268
1269 ret = sysdev_class_register(&amd_iommu_sysdev_class);
1270 if (ret)
1271 goto free;
1272
1273 ret = sysdev_register(&device_amd_iommu);
1274 if (ret)
1275 goto free;
1276
1277 if (iommu_pass_through)
1278 ret = amd_iommu_init_passthrough();
1279 else
1280 ret = amd_iommu_init_dma_ops();
1281 if (ret)
1282 goto free;
1283
1284 enable_iommus();
1285
1286 if (iommu_pass_through)
1287 goto out;
1288
1289 printk(KERN_INFO "AMD-Vi: device isolation ");
1290 if (amd_iommu_isolate)
1291 printk("enabled\n");
1292 else
1293 printk("disabled\n");
1294
1295 if (amd_iommu_unmap_flush)
1296 printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n");
1297 else
1298 printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
1299
1300out:
1301 return ret;
1302
1303free:
1304 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
1305 get_order(MAX_DOMAIN_ID/8));
1306
1307 free_pages((unsigned long)amd_iommu_pd_table,
1308 get_order(rlookup_table_size));
1309
1310 free_pages((unsigned long)amd_iommu_rlookup_table,
1311 get_order(rlookup_table_size));
1312
1313 free_pages((unsigned long)amd_iommu_alias_table,
1314 get_order(alias_table_size));
1315
1316 free_pages((unsigned long)amd_iommu_dev_table,
1317 get_order(dev_table_size));
1318
1319 free_iommu_all();
1320
1321 free_unity_maps();
1322
1323 goto out;
1324}
1325
1326void amd_iommu_shutdown(void)
1327{
1328 disable_iommus();
1329}
1330
1331
1332
1333
1334
1335
1336
1337
1338static int __init early_amd_iommu_detect(struct acpi_table_header *table)
1339{
1340 return 0;
1341}
1342
1343void __init amd_iommu_detect(void)
1344{
1345 if (swiotlb || no_iommu || (iommu_detected && !gart_iommu_aperture))
1346 return;
1347
1348 if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) {
1349 iommu_detected = 1;
1350 amd_iommu_detected = 1;
1351#ifdef CONFIG_GART_IOMMU
1352 gart_iommu_aperture_disabled = 1;
1353 gart_iommu_aperture = 0;
1354#endif
1355 }
1356}
1357
1358
1359
1360
1361
1362
1363
1364
1365static int __init parse_amd_iommu_dump(char *str)
1366{
1367 amd_iommu_dump = true;
1368
1369 return 1;
1370}
1371
1372static int __init parse_amd_iommu_options(char *str)
1373{
1374 for (; *str; ++str) {
1375 if (strncmp(str, "isolate", 7) == 0)
1376 amd_iommu_isolate = true;
1377 if (strncmp(str, "share", 5) == 0)
1378 amd_iommu_isolate = false;
1379 if (strncmp(str, "fullflush", 9) == 0)
1380 amd_iommu_unmap_flush = true;
1381 }
1382
1383 return 1;
1384}
1385
1386__setup("amd_iommu_dump", parse_amd_iommu_dump);
1387__setup("amd_iommu=", parse_amd_iommu_options);
1388