1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/pci.h>
21#include <linux/acpi.h>
22#include <linux/list.h>
23#include <linux/slab.h>
24#include <linux/sysdev.h>
25#include <linux/interrupt.h>
26#include <linux/msi.h>
27#include <asm/pci-direct.h>
28#include <asm/amd_iommu_proto.h>
29#include <asm/amd_iommu_types.h>
30#include <asm/amd_iommu.h>
31#include <asm/iommu.h>
32#include <asm/gart.h>
33#include <asm/x86_init.h>
34#include <asm/iommu_table.h>
35
36
37
38#define IVRS_HEADER_LENGTH 48
39
40#define ACPI_IVHD_TYPE 0x10
41#define ACPI_IVMD_TYPE_ALL 0x20
42#define ACPI_IVMD_TYPE 0x21
43#define ACPI_IVMD_TYPE_RANGE 0x22
44
45#define IVHD_DEV_ALL 0x01
46#define IVHD_DEV_SELECT 0x02
47#define IVHD_DEV_SELECT_RANGE_START 0x03
48#define IVHD_DEV_RANGE_END 0x04
49#define IVHD_DEV_ALIAS 0x42
50#define IVHD_DEV_ALIAS_RANGE 0x43
51#define IVHD_DEV_EXT_SELECT 0x46
52#define IVHD_DEV_EXT_SELECT_RANGE 0x47
53
54#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
55#define IVHD_FLAG_PASSPW_EN_MASK 0x02
56#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
57#define IVHD_FLAG_ISOC_EN_MASK 0x08
58
59#define IVMD_FLAG_EXCL_RANGE 0x08
60#define IVMD_FLAG_UNITY_MAP 0x01
61
62#define ACPI_DEVFLAG_INITPASS 0x01
63#define ACPI_DEVFLAG_EXTINT 0x02
64#define ACPI_DEVFLAG_NMI 0x04
65#define ACPI_DEVFLAG_SYSMGT1 0x10
66#define ACPI_DEVFLAG_SYSMGT2 0x20
67#define ACPI_DEVFLAG_LINT0 0x40
68#define ACPI_DEVFLAG_LINT1 0x80
69#define ACPI_DEVFLAG_ATSDIS 0x10000000
70
71
72
73
74
75
76
77
78
79
80
81
82struct ivhd_header {
83 u8 type;
84 u8 flags;
85 u16 length;
86 u16 devid;
87 u16 cap_ptr;
88 u64 mmio_phys;
89 u16 pci_seg;
90 u16 info;
91 u32 reserved;
92} __attribute__((packed));
93
94
95
96
97
98struct ivhd_entry {
99 u8 type;
100 u16 devid;
101 u8 flags;
102 u32 ext;
103} __attribute__((packed));
104
105
106
107
108
109struct ivmd_header {
110 u8 type;
111 u8 flags;
112 u16 length;
113 u16 devid;
114 u16 aux;
115 u64 resv;
116 u64 range_start;
117 u64 range_length;
118} __attribute__((packed));
119
120bool amd_iommu_dump;
121
122static int __initdata amd_iommu_detected;
123static bool __initdata amd_iommu_disabled;
124
125u16 amd_iommu_last_bdf;
126
127LIST_HEAD(amd_iommu_unity_map);
128
129bool amd_iommu_unmap_flush;
130
131LIST_HEAD(amd_iommu_list);
132
133
134
135struct amd_iommu *amd_iommus[MAX_IOMMUS];
136int amd_iommus_present;
137
138
139bool amd_iommu_np_cache __read_mostly;
140
141
142
143
144static int __initdata amd_iommu_init_err;
145
146
147
148
149LIST_HEAD(amd_iommu_pd_list);
150spinlock_t amd_iommu_pd_lock;
151
152
153
154
155
156
157
158struct dev_table_entry *amd_iommu_dev_table;
159
160
161
162
163
164
165u16 *amd_iommu_alias_table;
166
167
168
169
170
171struct amd_iommu **amd_iommu_rlookup_table;
172
173
174
175
176
177unsigned long *amd_iommu_pd_alloc_bitmap;
178
179static u32 dev_table_size;
180static u32 alias_table_size;
181static u32 rlookup_table_size;
182
183static inline void update_last_devid(u16 devid)
184{
185 if (devid > amd_iommu_last_bdf)
186 amd_iommu_last_bdf = devid;
187}
188
189static inline unsigned long tbl_size(int entry_size)
190{
191 unsigned shift = PAGE_SHIFT +
192 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
193
194 return 1UL << shift;
195}
196
197
198
199static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
200{
201 u32 val;
202
203 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
204 pci_read_config_dword(iommu->dev, 0xfc, &val);
205 return val;
206}
207
208static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
209{
210 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
211 pci_write_config_dword(iommu->dev, 0xfc, val);
212 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
213}
214
215static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
216{
217 u32 val;
218
219 pci_write_config_dword(iommu->dev, 0xf0, address);
220 pci_read_config_dword(iommu->dev, 0xf4, &val);
221 return val;
222}
223
224static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
225{
226 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
227 pci_write_config_dword(iommu->dev, 0xf4, val);
228}
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243static void iommu_set_exclusion_range(struct amd_iommu *iommu)
244{
245 u64 start = iommu->exclusion_start & PAGE_MASK;
246 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
247 u64 entry;
248
249 if (!iommu->exclusion_start)
250 return;
251
252 entry = start | MMIO_EXCL_ENABLE_MASK;
253 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
254 &entry, sizeof(entry));
255
256 entry = limit;
257 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
258 &entry, sizeof(entry));
259}
260
261
262static void __init iommu_set_device_table(struct amd_iommu *iommu)
263{
264 u64 entry;
265
266 BUG_ON(iommu->mmio_base == NULL);
267
268 entry = virt_to_phys(amd_iommu_dev_table);
269 entry |= (dev_table_size >> 12) - 1;
270 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
271 &entry, sizeof(entry));
272}
273
274
275static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
276{
277 u32 ctrl;
278
279 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
280 ctrl |= (1 << bit);
281 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
282}
283
284static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
285{
286 u32 ctrl;
287
288 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
289 ctrl &= ~(1 << bit);
290 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
291}
292
293
294static void iommu_enable(struct amd_iommu *iommu)
295{
296 printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n",
297 dev_name(&iommu->dev->dev), iommu->cap_ptr);
298
299 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
300}
301
302static void iommu_disable(struct amd_iommu *iommu)
303{
304
305 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
306
307
308 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
309 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
310
311
312 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
313}
314
315
316
317
318
319static u8 * __init iommu_map_mmio_space(u64 address)
320{
321 u8 *ret;
322
323 if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
324 pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
325 address);
326 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
327 return NULL;
328 }
329
330 ret = ioremap_nocache(address, MMIO_REGION_LENGTH);
331 if (ret != NULL)
332 return ret;
333
334 release_mem_region(address, MMIO_REGION_LENGTH);
335
336 return NULL;
337}
338
339static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
340{
341 if (iommu->mmio_base)
342 iounmap(iommu->mmio_base);
343 release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH);
344}
345
346
347
348
349
350
351
352
353
354
355
356
357
358static inline int ivhd_entry_length(u8 *ivhd)
359{
360 return 0x04 << (*ivhd >> 6);
361}
362
363
364
365
366
367static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
368{
369 u32 cap;
370
371 cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
372 update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
373
374 return 0;
375}
376
377
378
379
380
381static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
382{
383 u8 *p = (void *)h, *end = (void *)h;
384 struct ivhd_entry *dev;
385
386 p += sizeof(*h);
387 end += h->length;
388
389 find_last_devid_on_pci(PCI_BUS(h->devid),
390 PCI_SLOT(h->devid),
391 PCI_FUNC(h->devid),
392 h->cap_ptr);
393
394 while (p < end) {
395 dev = (struct ivhd_entry *)p;
396 switch (dev->type) {
397 case IVHD_DEV_SELECT:
398 case IVHD_DEV_RANGE_END:
399 case IVHD_DEV_ALIAS:
400 case IVHD_DEV_EXT_SELECT:
401
402 update_last_devid(dev->devid);
403 break;
404 default:
405 break;
406 }
407 p += ivhd_entry_length(p);
408 }
409
410 WARN_ON(p != end);
411
412 return 0;
413}
414
415
416
417
418
419
420static int __init find_last_devid_acpi(struct acpi_table_header *table)
421{
422 int i;
423 u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
424 struct ivhd_header *h;
425
426
427
428
429
430 for (i = 0; i < table->length; ++i)
431 checksum += p[i];
432 if (checksum != 0) {
433
434 amd_iommu_init_err = -ENODEV;
435 return 0;
436 }
437
438 p += IVRS_HEADER_LENGTH;
439
440 end += table->length;
441 while (p < end) {
442 h = (struct ivhd_header *)p;
443 switch (h->type) {
444 case ACPI_IVHD_TYPE:
445 find_last_devid_from_ivhd(h);
446 break;
447 default:
448 break;
449 }
450 p += h->length;
451 }
452 WARN_ON(p != end);
453
454 return 0;
455}
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
472{
473 u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
474 get_order(CMD_BUFFER_SIZE));
475
476 if (cmd_buf == NULL)
477 return NULL;
478
479 iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED;
480
481 return cmd_buf;
482}
483
484
485
486
487
488void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
489{
490 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
491
492 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
493 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
494
495 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
496}
497
498
499
500
501
502static void iommu_enable_command_buffer(struct amd_iommu *iommu)
503{
504 u64 entry;
505
506 BUG_ON(iommu->cmd_buf == NULL);
507
508 entry = (u64)virt_to_phys(iommu->cmd_buf);
509 entry |= MMIO_CMD_SIZE_512;
510
511 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
512 &entry, sizeof(entry));
513
514 amd_iommu_reset_cmd_buffer(iommu);
515 iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED);
516}
517
518static void __init free_command_buffer(struct amd_iommu *iommu)
519{
520 free_pages((unsigned long)iommu->cmd_buf,
521 get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED)));
522}
523
524
525static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
526{
527 iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
528 get_order(EVT_BUFFER_SIZE));
529
530 if (iommu->evt_buf == NULL)
531 return NULL;
532
533 iommu->evt_buf_size = EVT_BUFFER_SIZE;
534
535 return iommu->evt_buf;
536}
537
538static void iommu_enable_event_buffer(struct amd_iommu *iommu)
539{
540 u64 entry;
541
542 BUG_ON(iommu->evt_buf == NULL);
543
544 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
545
546 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
547 &entry, sizeof(entry));
548
549
550 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
551 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
552
553 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
554}
555
556static void __init free_event_buffer(struct amd_iommu *iommu)
557{
558 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
559}
560
561
562static void set_dev_entry_bit(u16 devid, u8 bit)
563{
564 int i = (bit >> 5) & 0x07;
565 int _bit = bit & 0x1f;
566
567 amd_iommu_dev_table[devid].data[i] |= (1 << _bit);
568}
569
570static int get_dev_entry_bit(u16 devid, u8 bit)
571{
572 int i = (bit >> 5) & 0x07;
573 int _bit = bit & 0x1f;
574
575 return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit;
576}
577
578
579void amd_iommu_apply_erratum_63(u16 devid)
580{
581 int sysmgt;
582
583 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
584 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
585
586 if (sysmgt == 0x01)
587 set_dev_entry_bit(devid, DEV_ENTRY_IW);
588}
589
590
591static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
592{
593 amd_iommu_rlookup_table[devid] = iommu;
594}
595
596
597
598
599
600static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
601 u16 devid, u32 flags, u32 ext_flags)
602{
603 if (flags & ACPI_DEVFLAG_INITPASS)
604 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
605 if (flags & ACPI_DEVFLAG_EXTINT)
606 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
607 if (flags & ACPI_DEVFLAG_NMI)
608 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
609 if (flags & ACPI_DEVFLAG_SYSMGT1)
610 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
611 if (flags & ACPI_DEVFLAG_SYSMGT2)
612 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
613 if (flags & ACPI_DEVFLAG_LINT0)
614 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
615 if (flags & ACPI_DEVFLAG_LINT1)
616 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
617
618 amd_iommu_apply_erratum_63(devid);
619
620 set_iommu_for_device(iommu, devid);
621}
622
623
624
625
626
627static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
628{
629 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
630
631 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
632 return;
633
634 if (iommu) {
635
636
637
638
639
640 set_dev_entry_bit(m->devid, DEV_ENTRY_EX);
641 iommu->exclusion_start = m->range_start;
642 iommu->exclusion_length = m->range_length;
643 }
644}
645
646
647
648
649
650
651static void __init init_iommu_from_pci(struct amd_iommu *iommu)
652{
653 int cap_ptr = iommu->cap_ptr;
654 u32 range, misc;
655 int i, j;
656
657 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
658 &iommu->cap);
659 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
660 &range);
661 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
662 &misc);
663
664 iommu->first_device = calc_devid(MMIO_GET_BUS(range),
665 MMIO_GET_FD(range));
666 iommu->last_device = calc_devid(MMIO_GET_BUS(range),
667 MMIO_GET_LD(range));
668 iommu->evt_msi_num = MMIO_MSI_NUM(misc);
669
670 if (!is_rd890_iommu(iommu->dev))
671 return;
672
673
674
675
676
677
678
679 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
680 &iommu->stored_addr_lo);
681 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
682 &iommu->stored_addr_hi);
683
684
685 iommu->stored_addr_lo &= ~1;
686
687 for (i = 0; i < 6; i++)
688 for (j = 0; j < 0x12; j++)
689 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
690
691 for (i = 0; i < 0x83; i++)
692 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
693}
694
695
696
697
698
699static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
700 struct ivhd_header *h)
701{
702 u8 *p = (u8 *)h;
703 u8 *end = p, flags = 0;
704 u16 dev_i, devid = 0, devid_start = 0, devid_to = 0;
705 u32 ext_flags = 0;
706 bool alias = false;
707 struct ivhd_entry *e;
708
709
710
711
712 iommu->acpi_flags = h->flags;
713
714
715
716
717 p += sizeof(struct ivhd_header);
718 end += h->length;
719
720
721 while (p < end) {
722 e = (struct ivhd_entry *)p;
723 switch (e->type) {
724 case IVHD_DEV_ALL:
725
726 DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x"
727 " last device %02x:%02x.%x flags: %02x\n",
728 PCI_BUS(iommu->first_device),
729 PCI_SLOT(iommu->first_device),
730 PCI_FUNC(iommu->first_device),
731 PCI_BUS(iommu->last_device),
732 PCI_SLOT(iommu->last_device),
733 PCI_FUNC(iommu->last_device),
734 e->flags);
735
736 for (dev_i = iommu->first_device;
737 dev_i <= iommu->last_device; ++dev_i)
738 set_dev_entry_from_acpi(iommu, dev_i,
739 e->flags, 0);
740 break;
741 case IVHD_DEV_SELECT:
742
743 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
744 "flags: %02x\n",
745 PCI_BUS(e->devid),
746 PCI_SLOT(e->devid),
747 PCI_FUNC(e->devid),
748 e->flags);
749
750 devid = e->devid;
751 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
752 break;
753 case IVHD_DEV_SELECT_RANGE_START:
754
755 DUMP_printk(" DEV_SELECT_RANGE_START\t "
756 "devid: %02x:%02x.%x flags: %02x\n",
757 PCI_BUS(e->devid),
758 PCI_SLOT(e->devid),
759 PCI_FUNC(e->devid),
760 e->flags);
761
762 devid_start = e->devid;
763 flags = e->flags;
764 ext_flags = 0;
765 alias = false;
766 break;
767 case IVHD_DEV_ALIAS:
768
769 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
770 "flags: %02x devid_to: %02x:%02x.%x\n",
771 PCI_BUS(e->devid),
772 PCI_SLOT(e->devid),
773 PCI_FUNC(e->devid),
774 e->flags,
775 PCI_BUS(e->ext >> 8),
776 PCI_SLOT(e->ext >> 8),
777 PCI_FUNC(e->ext >> 8));
778
779 devid = e->devid;
780 devid_to = e->ext >> 8;
781 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
782 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
783 amd_iommu_alias_table[devid] = devid_to;
784 break;
785 case IVHD_DEV_ALIAS_RANGE:
786
787 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
788 "devid: %02x:%02x.%x flags: %02x "
789 "devid_to: %02x:%02x.%x\n",
790 PCI_BUS(e->devid),
791 PCI_SLOT(e->devid),
792 PCI_FUNC(e->devid),
793 e->flags,
794 PCI_BUS(e->ext >> 8),
795 PCI_SLOT(e->ext >> 8),
796 PCI_FUNC(e->ext >> 8));
797
798 devid_start = e->devid;
799 flags = e->flags;
800 devid_to = e->ext >> 8;
801 ext_flags = 0;
802 alias = true;
803 break;
804 case IVHD_DEV_EXT_SELECT:
805
806 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
807 "flags: %02x ext: %08x\n",
808 PCI_BUS(e->devid),
809 PCI_SLOT(e->devid),
810 PCI_FUNC(e->devid),
811 e->flags, e->ext);
812
813 devid = e->devid;
814 set_dev_entry_from_acpi(iommu, devid, e->flags,
815 e->ext);
816 break;
817 case IVHD_DEV_EXT_SELECT_RANGE:
818
819 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
820 "%02x:%02x.%x flags: %02x ext: %08x\n",
821 PCI_BUS(e->devid),
822 PCI_SLOT(e->devid),
823 PCI_FUNC(e->devid),
824 e->flags, e->ext);
825
826 devid_start = e->devid;
827 flags = e->flags;
828 ext_flags = e->ext;
829 alias = false;
830 break;
831 case IVHD_DEV_RANGE_END:
832
833 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
834 PCI_BUS(e->devid),
835 PCI_SLOT(e->devid),
836 PCI_FUNC(e->devid));
837
838 devid = e->devid;
839 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
840 if (alias) {
841 amd_iommu_alias_table[dev_i] = devid_to;
842 set_dev_entry_from_acpi(iommu,
843 devid_to, flags, ext_flags);
844 }
845 set_dev_entry_from_acpi(iommu, dev_i,
846 flags, ext_flags);
847 }
848 break;
849 default:
850 break;
851 }
852
853 p += ivhd_entry_length(p);
854 }
855}
856
857
858static int __init init_iommu_devices(struct amd_iommu *iommu)
859{
860 u16 i;
861
862 for (i = iommu->first_device; i <= iommu->last_device; ++i)
863 set_iommu_for_device(iommu, i);
864
865 return 0;
866}
867
868static void __init free_iommu_one(struct amd_iommu *iommu)
869{
870 free_command_buffer(iommu);
871 free_event_buffer(iommu);
872 iommu_unmap_mmio_space(iommu);
873}
874
875static void __init free_iommu_all(void)
876{
877 struct amd_iommu *iommu, *next;
878
879 for_each_iommu_safe(iommu, next) {
880 list_del(&iommu->list);
881 free_iommu_one(iommu);
882 kfree(iommu);
883 }
884}
885
886
887
888
889
890
891static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
892{
893 spin_lock_init(&iommu->lock);
894
895
896 list_add_tail(&iommu->list, &amd_iommu_list);
897 iommu->index = amd_iommus_present++;
898
899 if (unlikely(iommu->index >= MAX_IOMMUS)) {
900 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
901 return -ENOSYS;
902 }
903
904
905 amd_iommus[iommu->index] = iommu;
906
907
908
909
910 iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff);
911 if (!iommu->dev)
912 return 1;
913
914 iommu->cap_ptr = h->cap_ptr;
915 iommu->pci_seg = h->pci_seg;
916 iommu->mmio_phys = h->mmio_phys;
917 iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys);
918 if (!iommu->mmio_base)
919 return -ENOMEM;
920
921 iommu->cmd_buf = alloc_command_buffer(iommu);
922 if (!iommu->cmd_buf)
923 return -ENOMEM;
924
925 iommu->evt_buf = alloc_event_buffer(iommu);
926 if (!iommu->evt_buf)
927 return -ENOMEM;
928
929 iommu->int_enabled = false;
930
931 init_iommu_from_pci(iommu);
932 init_iommu_from_acpi(iommu, h);
933 init_iommu_devices(iommu);
934
935 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
936 amd_iommu_np_cache = true;
937
938 return pci_enable_device(iommu->dev);
939}
940
941
942
943
944
945static int __init init_iommu_all(struct acpi_table_header *table)
946{
947 u8 *p = (u8 *)table, *end = (u8 *)table;
948 struct ivhd_header *h;
949 struct amd_iommu *iommu;
950 int ret;
951
952 end += table->length;
953 p += IVRS_HEADER_LENGTH;
954
955 while (p < end) {
956 h = (struct ivhd_header *)p;
957 switch (*p) {
958 case ACPI_IVHD_TYPE:
959
960 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
961 "seg: %d flags: %01x info %04x\n",
962 PCI_BUS(h->devid), PCI_SLOT(h->devid),
963 PCI_FUNC(h->devid), h->cap_ptr,
964 h->pci_seg, h->flags, h->info);
965 DUMP_printk(" mmio-addr: %016llx\n",
966 h->mmio_phys);
967
968 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
969 if (iommu == NULL) {
970 amd_iommu_init_err = -ENOMEM;
971 return 0;
972 }
973
974 ret = init_iommu_one(iommu, h);
975 if (ret) {
976 amd_iommu_init_err = ret;
977 return 0;
978 }
979 break;
980 default:
981 break;
982 }
983 p += h->length;
984
985 }
986 WARN_ON(p != end);
987
988 return 0;
989}
990
991
992
993
994
995
996
997
998
999
1000static int iommu_setup_msi(struct amd_iommu *iommu)
1001{
1002 int r;
1003
1004 if (pci_enable_msi(iommu->dev))
1005 return 1;
1006
1007 r = request_irq(iommu->dev->irq, amd_iommu_int_handler,
1008 IRQF_SAMPLE_RANDOM,
1009 "AMD-Vi",
1010 NULL);
1011
1012 if (r) {
1013 pci_disable_msi(iommu->dev);
1014 return 1;
1015 }
1016
1017 iommu->int_enabled = true;
1018 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1019
1020 return 0;
1021}
1022
1023static int iommu_init_msi(struct amd_iommu *iommu)
1024{
1025 if (iommu->int_enabled)
1026 return 0;
1027
1028 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
1029 return iommu_setup_msi(iommu);
1030
1031 return 1;
1032}
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042static void __init free_unity_maps(void)
1043{
1044 struct unity_map_entry *entry, *next;
1045
1046 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
1047 list_del(&entry->list);
1048 kfree(entry);
1049 }
1050}
1051
1052
1053static int __init init_exclusion_range(struct ivmd_header *m)
1054{
1055 int i;
1056
1057 switch (m->type) {
1058 case ACPI_IVMD_TYPE:
1059 set_device_exclusion_range(m->devid, m);
1060 break;
1061 case ACPI_IVMD_TYPE_ALL:
1062 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1063 set_device_exclusion_range(i, m);
1064 break;
1065 case ACPI_IVMD_TYPE_RANGE:
1066 for (i = m->devid; i <= m->aux; ++i)
1067 set_device_exclusion_range(i, m);
1068 break;
1069 default:
1070 break;
1071 }
1072
1073 return 0;
1074}
1075
1076
1077static int __init init_unity_map_range(struct ivmd_header *m)
1078{
1079 struct unity_map_entry *e = 0;
1080 char *s;
1081
1082 e = kzalloc(sizeof(*e), GFP_KERNEL);
1083 if (e == NULL)
1084 return -ENOMEM;
1085
1086 switch (m->type) {
1087 default:
1088 kfree(e);
1089 return 0;
1090 case ACPI_IVMD_TYPE:
1091 s = "IVMD_TYPEi\t\t\t";
1092 e->devid_start = e->devid_end = m->devid;
1093 break;
1094 case ACPI_IVMD_TYPE_ALL:
1095 s = "IVMD_TYPE_ALL\t\t";
1096 e->devid_start = 0;
1097 e->devid_end = amd_iommu_last_bdf;
1098 break;
1099 case ACPI_IVMD_TYPE_RANGE:
1100 s = "IVMD_TYPE_RANGE\t\t";
1101 e->devid_start = m->devid;
1102 e->devid_end = m->aux;
1103 break;
1104 }
1105 e->address_start = PAGE_ALIGN(m->range_start);
1106 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
1107 e->prot = m->flags >> 1;
1108
1109 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
1110 " range_start: %016llx range_end: %016llx flags: %x\n", s,
1111 PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start),
1112 PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end),
1113 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
1114 e->address_start, e->address_end, m->flags);
1115
1116 list_add_tail(&e->list, &amd_iommu_unity_map);
1117
1118 return 0;
1119}
1120
1121
1122static int __init init_memory_definitions(struct acpi_table_header *table)
1123{
1124 u8 *p = (u8 *)table, *end = (u8 *)table;
1125 struct ivmd_header *m;
1126
1127 end += table->length;
1128 p += IVRS_HEADER_LENGTH;
1129
1130 while (p < end) {
1131 m = (struct ivmd_header *)p;
1132 if (m->flags & IVMD_FLAG_EXCL_RANGE)
1133 init_exclusion_range(m);
1134 else if (m->flags & IVMD_FLAG_UNITY_MAP)
1135 init_unity_map_range(m);
1136
1137 p += m->length;
1138 }
1139
1140 return 0;
1141}
1142
1143
1144
1145
1146
1147static void init_device_table(void)
1148{
1149 u16 devid;
1150
1151 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1152 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
1153 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
1154 }
1155}
1156
1157static void iommu_init_flags(struct amd_iommu *iommu)
1158{
1159 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
1160 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
1161 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
1162
1163 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
1164 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
1165 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
1166
1167 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
1168 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
1169 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
1170
1171 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
1172 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
1173 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
1174
1175
1176
1177
1178 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
1179}
1180
1181static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1182{
1183 int i, j;
1184 u32 ioc_feature_control;
1185 struct pci_dev *pdev = NULL;
1186
1187
1188 if (!is_rd890_iommu(iommu->dev))
1189 return;
1190
1191
1192
1193
1194
1195 pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
1196
1197 if (!pdev)
1198 return;
1199
1200
1201 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
1202 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
1203
1204
1205 if (!(ioc_feature_control & 0x1))
1206 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
1207
1208 pci_dev_put(pdev);
1209
1210
1211 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1212 iommu->stored_addr_lo);
1213 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
1214 iommu->stored_addr_hi);
1215
1216
1217 for (i = 0; i < 6; i++)
1218 for (j = 0; j < 0x12; j++)
1219 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
1220
1221
1222 for (i = 0; i < 0x83; i++)
1223 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
1224
1225
1226 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1227 iommu->stored_addr_lo | 1);
1228}
1229
1230
1231
1232
1233
1234static void enable_iommus(void)
1235{
1236 struct amd_iommu *iommu;
1237
1238 for_each_iommu(iommu) {
1239 iommu_disable(iommu);
1240 iommu_init_flags(iommu);
1241 iommu_set_device_table(iommu);
1242 iommu_enable_command_buffer(iommu);
1243 iommu_enable_event_buffer(iommu);
1244 iommu_set_exclusion_range(iommu);
1245 iommu_init_msi(iommu);
1246 iommu_enable(iommu);
1247 }
1248}
1249
1250static void disable_iommus(void)
1251{
1252 struct amd_iommu *iommu;
1253
1254 for_each_iommu(iommu)
1255 iommu_disable(iommu);
1256}
1257
1258
1259
1260
1261
1262
1263static int amd_iommu_resume(struct sys_device *dev)
1264{
1265 struct amd_iommu *iommu;
1266
1267 for_each_iommu(iommu)
1268 iommu_apply_resume_quirks(iommu);
1269
1270
1271 enable_iommus();
1272
1273
1274
1275
1276
1277 amd_iommu_flush_all_devices();
1278 amd_iommu_flush_all_domains();
1279
1280 return 0;
1281}
1282
1283static int amd_iommu_suspend(struct sys_device *dev, pm_message_t state)
1284{
1285
1286 disable_iommus();
1287
1288 return 0;
1289}
1290
1291static struct sysdev_class amd_iommu_sysdev_class = {
1292 .name = "amd_iommu",
1293 .suspend = amd_iommu_suspend,
1294 .resume = amd_iommu_resume,
1295};
1296
1297static struct sys_device device_amd_iommu = {
1298 .id = 0,
1299 .cls = &amd_iommu_sysdev_class,
1300};
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330static int __init amd_iommu_init(void)
1331{
1332 int i, ret = 0;
1333
1334
1335
1336
1337
1338
1339 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)
1340 return -ENODEV;
1341
1342 ret = amd_iommu_init_err;
1343 if (ret)
1344 goto out;
1345
1346 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
1347 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
1348 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
1349
1350 ret = -ENOMEM;
1351
1352
1353 amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1354 get_order(dev_table_size));
1355 if (amd_iommu_dev_table == NULL)
1356 goto out;
1357
1358
1359
1360
1361
1362 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
1363 get_order(alias_table_size));
1364 if (amd_iommu_alias_table == NULL)
1365 goto free;
1366
1367
1368 amd_iommu_rlookup_table = (void *)__get_free_pages(
1369 GFP_KERNEL | __GFP_ZERO,
1370 get_order(rlookup_table_size));
1371 if (amd_iommu_rlookup_table == NULL)
1372 goto free;
1373
1374 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
1375 GFP_KERNEL | __GFP_ZERO,
1376 get_order(MAX_DOMAIN_ID/8));
1377 if (amd_iommu_pd_alloc_bitmap == NULL)
1378 goto free;
1379
1380
1381 init_device_table();
1382
1383
1384
1385
1386 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1387 amd_iommu_alias_table[i] = i;
1388
1389
1390
1391
1392
1393 amd_iommu_pd_alloc_bitmap[0] = 1;
1394
1395 spin_lock_init(&amd_iommu_pd_lock);
1396
1397
1398
1399
1400
1401 ret = -ENODEV;
1402 if (acpi_table_parse("IVRS", init_iommu_all) != 0)
1403 goto free;
1404
1405 if (amd_iommu_init_err) {
1406 ret = amd_iommu_init_err;
1407 goto free;
1408 }
1409
1410 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
1411 goto free;
1412
1413 if (amd_iommu_init_err) {
1414 ret = amd_iommu_init_err;
1415 goto free;
1416 }
1417
1418 ret = sysdev_class_register(&amd_iommu_sysdev_class);
1419 if (ret)
1420 goto free;
1421
1422 ret = sysdev_register(&device_amd_iommu);
1423 if (ret)
1424 goto free;
1425
1426 ret = amd_iommu_init_devices();
1427 if (ret)
1428 goto free;
1429
1430 enable_iommus();
1431
1432 if (iommu_pass_through)
1433 ret = amd_iommu_init_passthrough();
1434 else
1435 ret = amd_iommu_init_dma_ops();
1436
1437 if (ret)
1438 goto free_disable;
1439
1440 amd_iommu_init_api();
1441
1442 amd_iommu_init_notifier();
1443
1444 if (iommu_pass_through)
1445 goto out;
1446
1447 if (amd_iommu_unmap_flush)
1448 printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n");
1449 else
1450 printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
1451
1452 x86_platform.iommu_shutdown = disable_iommus;
1453out:
1454 return ret;
1455
1456free_disable:
1457 disable_iommus();
1458
1459free:
1460 amd_iommu_uninit_devices();
1461
1462 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
1463 get_order(MAX_DOMAIN_ID/8));
1464
1465 free_pages((unsigned long)amd_iommu_rlookup_table,
1466 get_order(rlookup_table_size));
1467
1468 free_pages((unsigned long)amd_iommu_alias_table,
1469 get_order(alias_table_size));
1470
1471 free_pages((unsigned long)amd_iommu_dev_table,
1472 get_order(dev_table_size));
1473
1474 free_iommu_all();
1475
1476 free_unity_maps();
1477
1478#ifdef CONFIG_GART_IOMMU
1479
1480
1481
1482
1483 gart_iommu_init();
1484
1485#endif
1486
1487 goto out;
1488}
1489
1490
1491
1492
1493
1494
1495
1496
1497static int __init early_amd_iommu_detect(struct acpi_table_header *table)
1498{
1499 return 0;
1500}
1501
1502int __init amd_iommu_detect(void)
1503{
1504 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
1505 return -ENODEV;
1506
1507 if (amd_iommu_disabled)
1508 return -ENODEV;
1509
1510 if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) {
1511 iommu_detected = 1;
1512 amd_iommu_detected = 1;
1513 x86_init.iommu.iommu_init = amd_iommu_init;
1514
1515
1516 pci_request_acs();
1517 return 1;
1518 }
1519 return -ENODEV;
1520}
1521
1522
1523
1524
1525
1526
1527
1528
1529static int __init parse_amd_iommu_dump(char *str)
1530{
1531 amd_iommu_dump = true;
1532
1533 return 1;
1534}
1535
1536static int __init parse_amd_iommu_options(char *str)
1537{
1538 for (; *str; ++str) {
1539 if (strncmp(str, "fullflush", 9) == 0)
1540 amd_iommu_unmap_flush = true;
1541 if (strncmp(str, "off", 3) == 0)
1542 amd_iommu_disabled = true;
1543 }
1544
1545 return 1;
1546}
1547
1548__setup("amd_iommu_dump", parse_amd_iommu_dump);
1549__setup("amd_iommu=", parse_amd_iommu_options);
1550
1551IOMMU_INIT_FINISH(amd_iommu_detect,
1552 gart_iommu_hole_init,
1553 0,
1554 0);
1555