1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/pci.h>
21#include <linux/acpi.h>
22#include <linux/list.h>
23#include <linux/slab.h>
24#include <linux/syscore_ops.h>
25#include <linux/interrupt.h>
26#include <linux/msi.h>
27#include <linux/amd-iommu.h>
28#include <linux/export.h>
29#include <acpi/acpi.h>
30#include <asm/pci-direct.h>
31#include <asm/iommu.h>
32#include <asm/gart.h>
33#include <asm/x86_init.h>
34#include <asm/iommu_table.h>
35#include <asm/io_apic.h>
36#include <asm/irq_remapping.h>
37
38#include "amd_iommu_proto.h"
39#include "amd_iommu_types.h"
40#include "irq_remapping.h"
41
42
43
44
45#define IVRS_HEADER_LENGTH 48
46
47#define ACPI_IVHD_TYPE 0x10
48#define ACPI_IVMD_TYPE_ALL 0x20
49#define ACPI_IVMD_TYPE 0x21
50#define ACPI_IVMD_TYPE_RANGE 0x22
51
52#define IVHD_DEV_ALL 0x01
53#define IVHD_DEV_SELECT 0x02
54#define IVHD_DEV_SELECT_RANGE_START 0x03
55#define IVHD_DEV_RANGE_END 0x04
56#define IVHD_DEV_ALIAS 0x42
57#define IVHD_DEV_ALIAS_RANGE 0x43
58#define IVHD_DEV_EXT_SELECT 0x46
59#define IVHD_DEV_EXT_SELECT_RANGE 0x47
60#define IVHD_DEV_SPECIAL 0x48
61
62#define IVHD_SPECIAL_IOAPIC 1
63#define IVHD_SPECIAL_HPET 2
64
65#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
66#define IVHD_FLAG_PASSPW_EN_MASK 0x02
67#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
68#define IVHD_FLAG_ISOC_EN_MASK 0x08
69
70#define IVMD_FLAG_EXCL_RANGE 0x08
71#define IVMD_FLAG_UNITY_MAP 0x01
72
73#define ACPI_DEVFLAG_INITPASS 0x01
74#define ACPI_DEVFLAG_EXTINT 0x02
75#define ACPI_DEVFLAG_NMI 0x04
76#define ACPI_DEVFLAG_SYSMGT1 0x10
77#define ACPI_DEVFLAG_SYSMGT2 0x20
78#define ACPI_DEVFLAG_LINT0 0x40
79#define ACPI_DEVFLAG_LINT1 0x80
80#define ACPI_DEVFLAG_ATSDIS 0x10000000
81
82
83
84
85
86
87
88
89
90
91
92
93struct ivhd_header {
94 u8 type;
95 u8 flags;
96 u16 length;
97 u16 devid;
98 u16 cap_ptr;
99 u64 mmio_phys;
100 u16 pci_seg;
101 u16 info;
102 u32 reserved;
103} __attribute__((packed));
104
105
106
107
108
109struct ivhd_entry {
110 u8 type;
111 u16 devid;
112 u8 flags;
113 u32 ext;
114} __attribute__((packed));
115
116
117
118
119
120struct ivmd_header {
121 u8 type;
122 u8 flags;
123 u16 length;
124 u16 devid;
125 u16 aux;
126 u64 resv;
127 u64 range_start;
128 u64 range_length;
129} __attribute__((packed));
130
131bool amd_iommu_dump;
132bool amd_iommu_irq_remap __read_mostly;
133
134static bool amd_iommu_detected;
135static bool __initdata amd_iommu_disabled;
136
137u16 amd_iommu_last_bdf;
138
139LIST_HEAD(amd_iommu_unity_map);
140
141u32 amd_iommu_unmap_flush;
142
143LIST_HEAD(amd_iommu_list);
144
145
146
147struct amd_iommu *amd_iommus[MAX_IOMMUS];
148int amd_iommus_present;
149
150
151bool amd_iommu_np_cache __read_mostly;
152bool amd_iommu_iotlb_sup __read_mostly = true;
153
154u32 amd_iommu_max_pasids __read_mostly = ~0;
155
156bool amd_iommu_v2_present __read_mostly;
157
158bool amd_iommu_force_isolation __read_mostly;
159
160
161
162
163LIST_HEAD(amd_iommu_pd_list);
164spinlock_t amd_iommu_pd_lock;
165
166
167
168
169
170
171
172struct dev_table_entry *amd_iommu_dev_table;
173
174
175
176
177
178
179u16 *amd_iommu_alias_table;
180
181
182
183
184
185struct amd_iommu **amd_iommu_rlookup_table;
186
187
188
189
190
191struct irq_remap_table **irq_lookup_table;
192
193
194
195
196
197unsigned long *amd_iommu_pd_alloc_bitmap;
198
199static u32 dev_table_size;
200static u32 alias_table_size;
201static u32 rlookup_table_size;
202
203enum iommu_init_state {
204 IOMMU_START_STATE,
205 IOMMU_IVRS_DETECTED,
206 IOMMU_ACPI_FINISHED,
207 IOMMU_ENABLED,
208 IOMMU_PCI_INIT,
209 IOMMU_INTERRUPTS_EN,
210 IOMMU_DMA_OPS,
211 IOMMU_INITIALIZED,
212 IOMMU_NOT_FOUND,
213 IOMMU_INIT_ERROR,
214};
215
216static enum iommu_init_state init_state = IOMMU_START_STATE;
217
218static int amd_iommu_enable_interrupts(void);
219static int __init iommu_go_to_state(enum iommu_init_state state);
220
221static inline void update_last_devid(u16 devid)
222{
223 if (devid > amd_iommu_last_bdf)
224 amd_iommu_last_bdf = devid;
225}
226
227static inline unsigned long tbl_size(int entry_size)
228{
229 unsigned shift = PAGE_SHIFT +
230 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
231
232 return 1UL << shift;
233}
234
235
236
237static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
238{
239 u32 val;
240
241 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
242 pci_read_config_dword(iommu->dev, 0xfc, &val);
243 return val;
244}
245
246static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
247{
248 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
249 pci_write_config_dword(iommu->dev, 0xfc, val);
250 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
251}
252
253static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
254{
255 u32 val;
256
257 pci_write_config_dword(iommu->dev, 0xf0, address);
258 pci_read_config_dword(iommu->dev, 0xf4, &val);
259 return val;
260}
261
262static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
263{
264 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
265 pci_write_config_dword(iommu->dev, 0xf4, val);
266}
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281static void iommu_set_exclusion_range(struct amd_iommu *iommu)
282{
283 u64 start = iommu->exclusion_start & PAGE_MASK;
284 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
285 u64 entry;
286
287 if (!iommu->exclusion_start)
288 return;
289
290 entry = start | MMIO_EXCL_ENABLE_MASK;
291 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
292 &entry, sizeof(entry));
293
294 entry = limit;
295 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
296 &entry, sizeof(entry));
297}
298
299
300static void iommu_set_device_table(struct amd_iommu *iommu)
301{
302 u64 entry;
303
304 BUG_ON(iommu->mmio_base == NULL);
305
306 entry = virt_to_phys(amd_iommu_dev_table);
307 entry |= (dev_table_size >> 12) - 1;
308 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
309 &entry, sizeof(entry));
310}
311
312
313static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
314{
315 u32 ctrl;
316
317 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
318 ctrl |= (1 << bit);
319 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
320}
321
322static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
323{
324 u32 ctrl;
325
326 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
327 ctrl &= ~(1 << bit);
328 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
329}
330
331static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
332{
333 u32 ctrl;
334
335 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
336 ctrl &= ~CTRL_INV_TO_MASK;
337 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
338 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
339}
340
341
342static void iommu_enable(struct amd_iommu *iommu)
343{
344 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
345}
346
347static void iommu_disable(struct amd_iommu *iommu)
348{
349
350 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
351
352
353 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
354 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
355
356
357 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
358}
359
360
361
362
363
364static u8 __iomem * __init iommu_map_mmio_space(u64 address)
365{
366 if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
367 pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
368 address);
369 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
370 return NULL;
371 }
372
373 return (u8 __iomem *)ioremap_nocache(address, MMIO_REGION_LENGTH);
374}
375
376static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
377{
378 if (iommu->mmio_base)
379 iounmap(iommu->mmio_base);
380 release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH);
381}
382
383
384
385
386
387
388
389
390
391
392
393
394
395static inline int ivhd_entry_length(u8 *ivhd)
396{
397 return 0x04 << (*ivhd >> 6);
398}
399
400
401
402
403
404static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
405{
406 u32 cap;
407
408 cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
409 update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
410
411 return 0;
412}
413
414
415
416
417
418static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
419{
420 u8 *p = (void *)h, *end = (void *)h;
421 struct ivhd_entry *dev;
422
423 p += sizeof(*h);
424 end += h->length;
425
426 find_last_devid_on_pci(PCI_BUS(h->devid),
427 PCI_SLOT(h->devid),
428 PCI_FUNC(h->devid),
429 h->cap_ptr);
430
431 while (p < end) {
432 dev = (struct ivhd_entry *)p;
433 switch (dev->type) {
434 case IVHD_DEV_SELECT:
435 case IVHD_DEV_RANGE_END:
436 case IVHD_DEV_ALIAS:
437 case IVHD_DEV_EXT_SELECT:
438
439 update_last_devid(dev->devid);
440 break;
441 default:
442 break;
443 }
444 p += ivhd_entry_length(p);
445 }
446
447 WARN_ON(p != end);
448
449 return 0;
450}
451
452
453
454
455
456
457static int __init find_last_devid_acpi(struct acpi_table_header *table)
458{
459 int i;
460 u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
461 struct ivhd_header *h;
462
463
464
465
466
467 for (i = 0; i < table->length; ++i)
468 checksum += p[i];
469 if (checksum != 0)
470
471 return -ENODEV;
472
473 p += IVRS_HEADER_LENGTH;
474
475 end += table->length;
476 while (p < end) {
477 h = (struct ivhd_header *)p;
478 switch (h->type) {
479 case ACPI_IVHD_TYPE:
480 find_last_devid_from_ivhd(h);
481 break;
482 default:
483 break;
484 }
485 p += h->length;
486 }
487 WARN_ON(p != end);
488
489 return 0;
490}
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
507{
508 u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
509 get_order(CMD_BUFFER_SIZE));
510
511 if (cmd_buf == NULL)
512 return NULL;
513
514 iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED;
515
516 return cmd_buf;
517}
518
519
520
521
522
523void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
524{
525 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
526
527 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
528 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
529
530 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
531}
532
533
534
535
536
537static void iommu_enable_command_buffer(struct amd_iommu *iommu)
538{
539 u64 entry;
540
541 BUG_ON(iommu->cmd_buf == NULL);
542
543 entry = (u64)virt_to_phys(iommu->cmd_buf);
544 entry |= MMIO_CMD_SIZE_512;
545
546 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
547 &entry, sizeof(entry));
548
549 amd_iommu_reset_cmd_buffer(iommu);
550 iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED);
551}
552
553static void __init free_command_buffer(struct amd_iommu *iommu)
554{
555 free_pages((unsigned long)iommu->cmd_buf,
556 get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED)));
557}
558
559
560static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
561{
562 iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
563 get_order(EVT_BUFFER_SIZE));
564
565 if (iommu->evt_buf == NULL)
566 return NULL;
567
568 iommu->evt_buf_size = EVT_BUFFER_SIZE;
569
570 return iommu->evt_buf;
571}
572
573static void iommu_enable_event_buffer(struct amd_iommu *iommu)
574{
575 u64 entry;
576
577 BUG_ON(iommu->evt_buf == NULL);
578
579 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
580
581 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
582 &entry, sizeof(entry));
583
584
585 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
586 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
587
588 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
589}
590
591static void __init free_event_buffer(struct amd_iommu *iommu)
592{
593 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
594}
595
596
597static u8 * __init alloc_ppr_log(struct amd_iommu *iommu)
598{
599 iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
600 get_order(PPR_LOG_SIZE));
601
602 if (iommu->ppr_log == NULL)
603 return NULL;
604
605 return iommu->ppr_log;
606}
607
608static void iommu_enable_ppr_log(struct amd_iommu *iommu)
609{
610 u64 entry;
611
612 if (iommu->ppr_log == NULL)
613 return;
614
615 entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
616
617 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
618 &entry, sizeof(entry));
619
620
621 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
622 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
623
624 iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
625 iommu_feature_enable(iommu, CONTROL_PPR_EN);
626}
627
628static void __init free_ppr_log(struct amd_iommu *iommu)
629{
630 if (iommu->ppr_log == NULL)
631 return;
632
633 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
634}
635
636static void iommu_enable_gt(struct amd_iommu *iommu)
637{
638 if (!iommu_feature(iommu, FEATURE_GT))
639 return;
640
641 iommu_feature_enable(iommu, CONTROL_GT_EN);
642}
643
644
645static void set_dev_entry_bit(u16 devid, u8 bit)
646{
647 int i = (bit >> 6) & 0x03;
648 int _bit = bit & 0x3f;
649
650 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
651}
652
653static int get_dev_entry_bit(u16 devid, u8 bit)
654{
655 int i = (bit >> 6) & 0x03;
656 int _bit = bit & 0x3f;
657
658 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
659}
660
661
662void amd_iommu_apply_erratum_63(u16 devid)
663{
664 int sysmgt;
665
666 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
667 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
668
669 if (sysmgt == 0x01)
670 set_dev_entry_bit(devid, DEV_ENTRY_IW);
671}
672
673
674static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
675{
676 amd_iommu_rlookup_table[devid] = iommu;
677}
678
679
680
681
682
683static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
684 u16 devid, u32 flags, u32 ext_flags)
685{
686 if (flags & ACPI_DEVFLAG_INITPASS)
687 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
688 if (flags & ACPI_DEVFLAG_EXTINT)
689 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
690 if (flags & ACPI_DEVFLAG_NMI)
691 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
692 if (flags & ACPI_DEVFLAG_SYSMGT1)
693 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
694 if (flags & ACPI_DEVFLAG_SYSMGT2)
695 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
696 if (flags & ACPI_DEVFLAG_LINT0)
697 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
698 if (flags & ACPI_DEVFLAG_LINT1)
699 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
700
701 amd_iommu_apply_erratum_63(devid);
702
703 set_iommu_for_device(iommu, devid);
704}
705
706static int add_special_device(u8 type, u8 id, u16 devid)
707{
708 struct devid_map *entry;
709 struct list_head *list;
710
711 if (type != IVHD_SPECIAL_IOAPIC && type != IVHD_SPECIAL_HPET)
712 return -EINVAL;
713
714 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
715 if (!entry)
716 return -ENOMEM;
717
718 entry->id = id;
719 entry->devid = devid;
720
721 if (type == IVHD_SPECIAL_IOAPIC)
722 list = &ioapic_map;
723 else
724 list = &hpet_map;
725
726 list_add_tail(&entry->list, list);
727
728 return 0;
729}
730
731
732
733
734
735static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
736{
737 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
738
739 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
740 return;
741
742 if (iommu) {
743
744
745
746
747
748 set_dev_entry_bit(m->devid, DEV_ENTRY_EX);
749 iommu->exclusion_start = m->range_start;
750 iommu->exclusion_length = m->range_length;
751 }
752}
753
754
755
756
757
758static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
759 struct ivhd_header *h)
760{
761 u8 *p = (u8 *)h;
762 u8 *end = p, flags = 0;
763 u16 devid = 0, devid_start = 0, devid_to = 0;
764 u32 dev_i, ext_flags = 0;
765 bool alias = false;
766 struct ivhd_entry *e;
767
768
769
770
771 iommu->acpi_flags = h->flags;
772
773
774
775
776 p += sizeof(struct ivhd_header);
777 end += h->length;
778
779
780 while (p < end) {
781 e = (struct ivhd_entry *)p;
782 switch (e->type) {
783 case IVHD_DEV_ALL:
784
785 DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x"
786 " last device %02x:%02x.%x flags: %02x\n",
787 PCI_BUS(iommu->first_device),
788 PCI_SLOT(iommu->first_device),
789 PCI_FUNC(iommu->first_device),
790 PCI_BUS(iommu->last_device),
791 PCI_SLOT(iommu->last_device),
792 PCI_FUNC(iommu->last_device),
793 e->flags);
794
795 for (dev_i = iommu->first_device;
796 dev_i <= iommu->last_device; ++dev_i)
797 set_dev_entry_from_acpi(iommu, dev_i,
798 e->flags, 0);
799 break;
800 case IVHD_DEV_SELECT:
801
802 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
803 "flags: %02x\n",
804 PCI_BUS(e->devid),
805 PCI_SLOT(e->devid),
806 PCI_FUNC(e->devid),
807 e->flags);
808
809 devid = e->devid;
810 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
811 break;
812 case IVHD_DEV_SELECT_RANGE_START:
813
814 DUMP_printk(" DEV_SELECT_RANGE_START\t "
815 "devid: %02x:%02x.%x flags: %02x\n",
816 PCI_BUS(e->devid),
817 PCI_SLOT(e->devid),
818 PCI_FUNC(e->devid),
819 e->flags);
820
821 devid_start = e->devid;
822 flags = e->flags;
823 ext_flags = 0;
824 alias = false;
825 break;
826 case IVHD_DEV_ALIAS:
827
828 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
829 "flags: %02x devid_to: %02x:%02x.%x\n",
830 PCI_BUS(e->devid),
831 PCI_SLOT(e->devid),
832 PCI_FUNC(e->devid),
833 e->flags,
834 PCI_BUS(e->ext >> 8),
835 PCI_SLOT(e->ext >> 8),
836 PCI_FUNC(e->ext >> 8));
837
838 devid = e->devid;
839 devid_to = e->ext >> 8;
840 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
841 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
842 amd_iommu_alias_table[devid] = devid_to;
843 break;
844 case IVHD_DEV_ALIAS_RANGE:
845
846 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
847 "devid: %02x:%02x.%x flags: %02x "
848 "devid_to: %02x:%02x.%x\n",
849 PCI_BUS(e->devid),
850 PCI_SLOT(e->devid),
851 PCI_FUNC(e->devid),
852 e->flags,
853 PCI_BUS(e->ext >> 8),
854 PCI_SLOT(e->ext >> 8),
855 PCI_FUNC(e->ext >> 8));
856
857 devid_start = e->devid;
858 flags = e->flags;
859 devid_to = e->ext >> 8;
860 ext_flags = 0;
861 alias = true;
862 break;
863 case IVHD_DEV_EXT_SELECT:
864
865 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
866 "flags: %02x ext: %08x\n",
867 PCI_BUS(e->devid),
868 PCI_SLOT(e->devid),
869 PCI_FUNC(e->devid),
870 e->flags, e->ext);
871
872 devid = e->devid;
873 set_dev_entry_from_acpi(iommu, devid, e->flags,
874 e->ext);
875 break;
876 case IVHD_DEV_EXT_SELECT_RANGE:
877
878 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
879 "%02x:%02x.%x flags: %02x ext: %08x\n",
880 PCI_BUS(e->devid),
881 PCI_SLOT(e->devid),
882 PCI_FUNC(e->devid),
883 e->flags, e->ext);
884
885 devid_start = e->devid;
886 flags = e->flags;
887 ext_flags = e->ext;
888 alias = false;
889 break;
890 case IVHD_DEV_RANGE_END:
891
892 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
893 PCI_BUS(e->devid),
894 PCI_SLOT(e->devid),
895 PCI_FUNC(e->devid));
896
897 devid = e->devid;
898 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
899 if (alias) {
900 amd_iommu_alias_table[dev_i] = devid_to;
901 set_dev_entry_from_acpi(iommu,
902 devid_to, flags, ext_flags);
903 }
904 set_dev_entry_from_acpi(iommu, dev_i,
905 flags, ext_flags);
906 }
907 break;
908 case IVHD_DEV_SPECIAL: {
909 u8 handle, type;
910 const char *var;
911 u16 devid;
912 int ret;
913
914 handle = e->ext & 0xff;
915 devid = (e->ext >> 8) & 0xffff;
916 type = (e->ext >> 24) & 0xff;
917
918 if (type == IVHD_SPECIAL_IOAPIC)
919 var = "IOAPIC";
920 else if (type == IVHD_SPECIAL_HPET)
921 var = "HPET";
922 else
923 var = "UNKNOWN";
924
925 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
926 var, (int)handle,
927 PCI_BUS(devid),
928 PCI_SLOT(devid),
929 PCI_FUNC(devid));
930
931 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
932 ret = add_special_device(type, handle, devid);
933 if (ret)
934 return ret;
935 break;
936 }
937 default:
938 break;
939 }
940
941 p += ivhd_entry_length(p);
942 }
943
944 return 0;
945}
946
947
948static int __init init_iommu_devices(struct amd_iommu *iommu)
949{
950 u32 i;
951
952 for (i = iommu->first_device; i <= iommu->last_device; ++i)
953 set_iommu_for_device(iommu, i);
954
955 return 0;
956}
957
958static void __init free_iommu_one(struct amd_iommu *iommu)
959{
960 free_command_buffer(iommu);
961 free_event_buffer(iommu);
962 free_ppr_log(iommu);
963 iommu_unmap_mmio_space(iommu);
964}
965
966static void __init free_iommu_all(void)
967{
968 struct amd_iommu *iommu, *next;
969
970 for_each_iommu_safe(iommu, next) {
971 list_del(&iommu->list);
972 free_iommu_one(iommu);
973 kfree(iommu);
974 }
975}
976
977
978
979
980
981
982
983static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
984{
985 u32 value;
986
987 if ((boot_cpu_data.x86 != 0x15) ||
988 (boot_cpu_data.x86_model < 0x10) ||
989 (boot_cpu_data.x86_model > 0x1f))
990 return;
991
992 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
993 pci_read_config_dword(iommu->dev, 0xf4, &value);
994
995 if (value & BIT(2))
996 return;
997
998
999 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1000
1001 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1002 pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
1003 dev_name(&iommu->dev->dev));
1004
1005
1006 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1007}
1008
1009
1010
1011
1012
1013
1014static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1015{
1016 int ret;
1017
1018 spin_lock_init(&iommu->lock);
1019
1020
1021 list_add_tail(&iommu->list, &amd_iommu_list);
1022 iommu->index = amd_iommus_present++;
1023
1024 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1025 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
1026 return -ENOSYS;
1027 }
1028
1029
1030 amd_iommus[iommu->index] = iommu;
1031
1032
1033
1034
1035 iommu->devid = h->devid;
1036 iommu->cap_ptr = h->cap_ptr;
1037 iommu->pci_seg = h->pci_seg;
1038 iommu->mmio_phys = h->mmio_phys;
1039 iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys);
1040 if (!iommu->mmio_base)
1041 return -ENOMEM;
1042
1043 iommu->cmd_buf = alloc_command_buffer(iommu);
1044 if (!iommu->cmd_buf)
1045 return -ENOMEM;
1046
1047 iommu->evt_buf = alloc_event_buffer(iommu);
1048 if (!iommu->evt_buf)
1049 return -ENOMEM;
1050
1051 iommu->int_enabled = false;
1052
1053 ret = init_iommu_from_acpi(iommu, h);
1054 if (ret)
1055 return ret;
1056
1057
1058
1059
1060
1061 amd_iommu_rlookup_table[iommu->devid] = NULL;
1062
1063 init_iommu_devices(iommu);
1064
1065 return 0;
1066}
1067
1068
1069
1070
1071
1072static int __init init_iommu_all(struct acpi_table_header *table)
1073{
1074 u8 *p = (u8 *)table, *end = (u8 *)table;
1075 struct ivhd_header *h;
1076 struct amd_iommu *iommu;
1077 int ret;
1078
1079 end += table->length;
1080 p += IVRS_HEADER_LENGTH;
1081
1082 while (p < end) {
1083 h = (struct ivhd_header *)p;
1084 switch (*p) {
1085 case ACPI_IVHD_TYPE:
1086
1087 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1088 "seg: %d flags: %01x info %04x\n",
1089 PCI_BUS(h->devid), PCI_SLOT(h->devid),
1090 PCI_FUNC(h->devid), h->cap_ptr,
1091 h->pci_seg, h->flags, h->info);
1092 DUMP_printk(" mmio-addr: %016llx\n",
1093 h->mmio_phys);
1094
1095 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1096 if (iommu == NULL)
1097 return -ENOMEM;
1098
1099 ret = init_iommu_one(iommu, h);
1100 if (ret)
1101 return ret;
1102 break;
1103 default:
1104 break;
1105 }
1106 p += h->length;
1107
1108 }
1109 WARN_ON(p != end);
1110
1111 return 0;
1112}
1113
1114static int iommu_init_pci(struct amd_iommu *iommu)
1115{
1116 int cap_ptr = iommu->cap_ptr;
1117 u32 range, misc, low, high;
1118
1119 iommu->dev = pci_get_bus_and_slot(PCI_BUS(iommu->devid),
1120 iommu->devid & 0xff);
1121 if (!iommu->dev)
1122 return -ENODEV;
1123
1124 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1125 &iommu->cap);
1126 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
1127 &range);
1128 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
1129 &misc);
1130
1131 iommu->first_device = calc_devid(MMIO_GET_BUS(range),
1132 MMIO_GET_FD(range));
1133 iommu->last_device = calc_devid(MMIO_GET_BUS(range),
1134 MMIO_GET_LD(range));
1135
1136 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1137 amd_iommu_iotlb_sup = false;
1138
1139
1140 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
1141 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
1142
1143 iommu->features = ((u64)high << 32) | low;
1144
1145 if (iommu_feature(iommu, FEATURE_GT)) {
1146 int glxval;
1147 u32 pasids;
1148 u64 shift;
1149
1150 shift = iommu->features & FEATURE_PASID_MASK;
1151 shift >>= FEATURE_PASID_SHIFT;
1152 pasids = (1 << shift);
1153
1154 amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids);
1155
1156 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1157 glxval >>= FEATURE_GLXVAL_SHIFT;
1158
1159 if (amd_iommu_max_glx_val == -1)
1160 amd_iommu_max_glx_val = glxval;
1161 else
1162 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1163 }
1164
1165 if (iommu_feature(iommu, FEATURE_GT) &&
1166 iommu_feature(iommu, FEATURE_PPR)) {
1167 iommu->is_iommu_v2 = true;
1168 amd_iommu_v2_present = true;
1169 }
1170
1171 if (iommu_feature(iommu, FEATURE_PPR)) {
1172 iommu->ppr_log = alloc_ppr_log(iommu);
1173 if (!iommu->ppr_log)
1174 return -ENOMEM;
1175 }
1176
1177 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1178 amd_iommu_np_cache = true;
1179
1180 if (is_rd890_iommu(iommu->dev)) {
1181 int i, j;
1182
1183 iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
1184 PCI_DEVFN(0, 0));
1185
1186
1187
1188
1189
1190
1191 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1192 &iommu->stored_addr_lo);
1193 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1194 &iommu->stored_addr_hi);
1195
1196
1197 iommu->stored_addr_lo &= ~1;
1198
1199 for (i = 0; i < 6; i++)
1200 for (j = 0; j < 0x12; j++)
1201 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1202
1203 for (i = 0; i < 0x83; i++)
1204 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1205 }
1206
1207 amd_iommu_erratum_746_workaround(iommu);
1208
1209 return pci_enable_device(iommu->dev);
1210}
1211
1212static void print_iommu_info(void)
1213{
1214 static const char * const feat_str[] = {
1215 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1216 "IA", "GA", "HE", "PC"
1217 };
1218 struct amd_iommu *iommu;
1219
1220 for_each_iommu(iommu) {
1221 int i;
1222
1223 pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n",
1224 dev_name(&iommu->dev->dev), iommu->cap_ptr);
1225
1226 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1227 pr_info("AMD-Vi: Extended features: ");
1228 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1229 if (iommu_feature(iommu, (1ULL << i)))
1230 pr_cont(" %s", feat_str[i]);
1231 }
1232 pr_cont("\n");
1233 }
1234 }
1235 if (irq_remapping_enabled)
1236 pr_info("AMD-Vi: Interrupt remapping enabled\n");
1237}
1238
1239static int __init amd_iommu_init_pci(void)
1240{
1241 struct amd_iommu *iommu;
1242 int ret = 0;
1243
1244 for_each_iommu(iommu) {
1245 ret = iommu_init_pci(iommu);
1246 if (ret)
1247 break;
1248 }
1249
1250 ret = amd_iommu_init_devices();
1251
1252 print_iommu_info();
1253
1254 return ret;
1255}
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266static int iommu_setup_msi(struct amd_iommu *iommu)
1267{
1268 int r;
1269
1270 r = pci_enable_msi(iommu->dev);
1271 if (r)
1272 return r;
1273
1274 r = request_threaded_irq(iommu->dev->irq,
1275 amd_iommu_int_handler,
1276 amd_iommu_int_thread,
1277 0, "AMD-Vi",
1278 iommu->dev);
1279
1280 if (r) {
1281 pci_disable_msi(iommu->dev);
1282 return r;
1283 }
1284
1285 iommu->int_enabled = true;
1286
1287 return 0;
1288}
1289
1290static int iommu_init_msi(struct amd_iommu *iommu)
1291{
1292 int ret;
1293
1294 if (iommu->int_enabled)
1295 goto enable_faults;
1296
1297 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
1298 ret = iommu_setup_msi(iommu);
1299 else
1300 ret = -ENODEV;
1301
1302 if (ret)
1303 return ret;
1304
1305enable_faults:
1306 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1307
1308 if (iommu->ppr_log != NULL)
1309 iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
1310
1311 return 0;
1312}
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322static void __init free_unity_maps(void)
1323{
1324 struct unity_map_entry *entry, *next;
1325
1326 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
1327 list_del(&entry->list);
1328 kfree(entry);
1329 }
1330}
1331
1332
1333static int __init init_exclusion_range(struct ivmd_header *m)
1334{
1335 int i;
1336
1337 switch (m->type) {
1338 case ACPI_IVMD_TYPE:
1339 set_device_exclusion_range(m->devid, m);
1340 break;
1341 case ACPI_IVMD_TYPE_ALL:
1342 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1343 set_device_exclusion_range(i, m);
1344 break;
1345 case ACPI_IVMD_TYPE_RANGE:
1346 for (i = m->devid; i <= m->aux; ++i)
1347 set_device_exclusion_range(i, m);
1348 break;
1349 default:
1350 break;
1351 }
1352
1353 return 0;
1354}
1355
1356
1357static int __init init_unity_map_range(struct ivmd_header *m)
1358{
1359 struct unity_map_entry *e = NULL;
1360 char *s;
1361
1362 e = kzalloc(sizeof(*e), GFP_KERNEL);
1363 if (e == NULL)
1364 return -ENOMEM;
1365
1366 switch (m->type) {
1367 default:
1368 kfree(e);
1369 return 0;
1370 case ACPI_IVMD_TYPE:
1371 s = "IVMD_TYPEi\t\t\t";
1372 e->devid_start = e->devid_end = m->devid;
1373 break;
1374 case ACPI_IVMD_TYPE_ALL:
1375 s = "IVMD_TYPE_ALL\t\t";
1376 e->devid_start = 0;
1377 e->devid_end = amd_iommu_last_bdf;
1378 break;
1379 case ACPI_IVMD_TYPE_RANGE:
1380 s = "IVMD_TYPE_RANGE\t\t";
1381 e->devid_start = m->devid;
1382 e->devid_end = m->aux;
1383 break;
1384 }
1385 e->address_start = PAGE_ALIGN(m->range_start);
1386 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
1387 e->prot = m->flags >> 1;
1388
1389 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
1390 " range_start: %016llx range_end: %016llx flags: %x\n", s,
1391 PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start),
1392 PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end),
1393 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
1394 e->address_start, e->address_end, m->flags);
1395
1396 list_add_tail(&e->list, &amd_iommu_unity_map);
1397
1398 return 0;
1399}
1400
1401
1402static int __init init_memory_definitions(struct acpi_table_header *table)
1403{
1404 u8 *p = (u8 *)table, *end = (u8 *)table;
1405 struct ivmd_header *m;
1406
1407 end += table->length;
1408 p += IVRS_HEADER_LENGTH;
1409
1410 while (p < end) {
1411 m = (struct ivmd_header *)p;
1412 if (m->flags & IVMD_FLAG_EXCL_RANGE)
1413 init_exclusion_range(m);
1414 else if (m->flags & IVMD_FLAG_UNITY_MAP)
1415 init_unity_map_range(m);
1416
1417 p += m->length;
1418 }
1419
1420 return 0;
1421}
1422
1423
1424
1425
1426
1427static void init_device_table_dma(void)
1428{
1429 u32 devid;
1430
1431 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1432 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
1433 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
1434 }
1435}
1436
1437static void __init uninit_device_table_dma(void)
1438{
1439 u32 devid;
1440
1441 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1442 amd_iommu_dev_table[devid].data[0] = 0ULL;
1443 amd_iommu_dev_table[devid].data[1] = 0ULL;
1444 }
1445}
1446
1447static void init_device_table(void)
1448{
1449 u32 devid;
1450
1451 if (!amd_iommu_irq_remap)
1452 return;
1453
1454 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
1455 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
1456}
1457
1458static void iommu_init_flags(struct amd_iommu *iommu)
1459{
1460 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
1461 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
1462 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
1463
1464 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
1465 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
1466 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
1467
1468 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
1469 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
1470 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
1471
1472 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
1473 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
1474 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
1475
1476
1477
1478
1479 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
1480
1481
1482 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
1483}
1484
1485static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1486{
1487 int i, j;
1488 u32 ioc_feature_control;
1489 struct pci_dev *pdev = iommu->root_pdev;
1490
1491
1492 if (!is_rd890_iommu(iommu->dev) || !pdev)
1493 return;
1494
1495
1496
1497
1498
1499
1500
1501 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
1502 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
1503
1504
1505 if (!(ioc_feature_control & 0x1))
1506 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
1507
1508
1509 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1510 iommu->stored_addr_lo);
1511 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
1512 iommu->stored_addr_hi);
1513
1514
1515 for (i = 0; i < 6; i++)
1516 for (j = 0; j < 0x12; j++)
1517 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
1518
1519
1520 for (i = 0; i < 0x83; i++)
1521 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
1522
1523
1524 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1525 iommu->stored_addr_lo | 1);
1526}
1527
1528
1529
1530
1531
1532static void early_enable_iommus(void)
1533{
1534 struct amd_iommu *iommu;
1535
1536 for_each_iommu(iommu) {
1537 iommu_disable(iommu);
1538 iommu_init_flags(iommu);
1539 iommu_set_device_table(iommu);
1540 iommu_enable_command_buffer(iommu);
1541 iommu_enable_event_buffer(iommu);
1542 iommu_set_exclusion_range(iommu);
1543 iommu_enable(iommu);
1544 iommu_flush_all_caches(iommu);
1545 }
1546}
1547
1548static void enable_iommus_v2(void)
1549{
1550 struct amd_iommu *iommu;
1551
1552 for_each_iommu(iommu) {
1553 iommu_enable_ppr_log(iommu);
1554 iommu_enable_gt(iommu);
1555 }
1556}
1557
1558static void enable_iommus(void)
1559{
1560 early_enable_iommus();
1561
1562 enable_iommus_v2();
1563}
1564
1565static void disable_iommus(void)
1566{
1567 struct amd_iommu *iommu;
1568
1569 for_each_iommu(iommu)
1570 iommu_disable(iommu);
1571}
1572
1573
1574
1575
1576
1577
1578static void amd_iommu_resume(void)
1579{
1580 struct amd_iommu *iommu;
1581
1582 for_each_iommu(iommu)
1583 iommu_apply_resume_quirks(iommu);
1584
1585
1586 enable_iommus();
1587
1588 amd_iommu_enable_interrupts();
1589}
1590
1591static int amd_iommu_suspend(void)
1592{
1593
1594 disable_iommus();
1595
1596 return 0;
1597}
1598
1599static struct syscore_ops amd_iommu_syscore_ops = {
1600 .suspend = amd_iommu_suspend,
1601 .resume = amd_iommu_resume,
1602};
1603
1604static void __init free_on_init_error(void)
1605{
1606 free_pages((unsigned long)irq_lookup_table,
1607 get_order(rlookup_table_size));
1608
1609 if (amd_iommu_irq_cache) {
1610 kmem_cache_destroy(amd_iommu_irq_cache);
1611 amd_iommu_irq_cache = NULL;
1612
1613 }
1614
1615 free_pages((unsigned long)amd_iommu_rlookup_table,
1616 get_order(rlookup_table_size));
1617
1618 free_pages((unsigned long)amd_iommu_alias_table,
1619 get_order(alias_table_size));
1620
1621 free_pages((unsigned long)amd_iommu_dev_table,
1622 get_order(dev_table_size));
1623
1624 free_iommu_all();
1625
1626#ifdef CONFIG_GART_IOMMU
1627
1628
1629
1630
1631 gart_iommu_init();
1632
1633#endif
1634}
1635
1636
1637#define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
1638
1639static bool __init check_ioapic_information(void)
1640{
1641 bool ret, has_sb_ioapic;
1642 int idx;
1643
1644 has_sb_ioapic = false;
1645 ret = false;
1646
1647 for (idx = 0; idx < nr_ioapics; idx++) {
1648 int devid, id = mpc_ioapic_id(idx);
1649
1650 devid = get_ioapic_devid(id);
1651 if (devid < 0) {
1652 pr_err(FW_BUG "AMD-Vi: IOAPIC[%d] not in IVRS table\n", id);
1653 ret = false;
1654 } else if (devid == IOAPIC_SB_DEVID) {
1655 has_sb_ioapic = true;
1656 ret = true;
1657 }
1658 }
1659
1660 if (!has_sb_ioapic) {
1661
1662
1663
1664
1665
1666
1667
1668
1669 pr_err(FW_BUG "AMD-Vi: No southbridge IOAPIC found in IVRS table\n");
1670 }
1671
1672 if (!ret)
1673 pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug(s)\n");
1674
1675 return ret;
1676}
1677
1678static void __init free_dma_resources(void)
1679{
1680 amd_iommu_uninit_devices();
1681
1682 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
1683 get_order(MAX_DOMAIN_ID/8));
1684
1685 free_unity_maps();
1686}
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713static int __init early_amd_iommu_init(void)
1714{
1715 struct acpi_table_header *ivrs_base;
1716 acpi_size ivrs_size;
1717 acpi_status status;
1718 int i, ret = 0;
1719
1720 if (!amd_iommu_detected)
1721 return -ENODEV;
1722
1723 status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
1724 if (status == AE_NOT_FOUND)
1725 return -ENODEV;
1726 else if (ACPI_FAILURE(status)) {
1727 const char *err = acpi_format_exception(status);
1728 pr_err("AMD-Vi: IVRS table error: %s\n", err);
1729 return -EINVAL;
1730 }
1731
1732
1733
1734
1735
1736
1737 ret = find_last_devid_acpi(ivrs_base);
1738 if (ret)
1739 goto out;
1740
1741 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
1742 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
1743 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
1744
1745
1746 ret = -ENOMEM;
1747 amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1748 get_order(dev_table_size));
1749 if (amd_iommu_dev_table == NULL)
1750 goto out;
1751
1752
1753
1754
1755
1756 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
1757 get_order(alias_table_size));
1758 if (amd_iommu_alias_table == NULL)
1759 goto out;
1760
1761
1762 amd_iommu_rlookup_table = (void *)__get_free_pages(
1763 GFP_KERNEL | __GFP_ZERO,
1764 get_order(rlookup_table_size));
1765 if (amd_iommu_rlookup_table == NULL)
1766 goto out;
1767
1768 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
1769 GFP_KERNEL | __GFP_ZERO,
1770 get_order(MAX_DOMAIN_ID/8));
1771 if (amd_iommu_pd_alloc_bitmap == NULL)
1772 goto out;
1773
1774
1775
1776
1777 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1778 amd_iommu_alias_table[i] = i;
1779
1780
1781
1782
1783
1784 amd_iommu_pd_alloc_bitmap[0] = 1;
1785
1786 spin_lock_init(&amd_iommu_pd_lock);
1787
1788
1789
1790
1791
1792 ret = init_iommu_all(ivrs_base);
1793 if (ret)
1794 goto out;
1795
1796 if (amd_iommu_irq_remap)
1797 amd_iommu_irq_remap = check_ioapic_information();
1798
1799 if (amd_iommu_irq_remap) {
1800
1801
1802
1803
1804 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
1805 MAX_IRQS_PER_TABLE * sizeof(u32),
1806 IRQ_TABLE_ALIGNMENT,
1807 0, NULL);
1808 if (!amd_iommu_irq_cache)
1809 goto out;
1810
1811 irq_lookup_table = (void *)__get_free_pages(
1812 GFP_KERNEL | __GFP_ZERO,
1813 get_order(rlookup_table_size));
1814 if (!irq_lookup_table)
1815 goto out;
1816 }
1817
1818 ret = init_memory_definitions(ivrs_base);
1819 if (ret)
1820 goto out;
1821
1822
1823 init_device_table();
1824
1825out:
1826
1827 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
1828 ivrs_base = NULL;
1829
1830 return ret;
1831}
1832
1833static int amd_iommu_enable_interrupts(void)
1834{
1835 struct amd_iommu *iommu;
1836 int ret = 0;
1837
1838 for_each_iommu(iommu) {
1839 ret = iommu_init_msi(iommu);
1840 if (ret)
1841 goto out;
1842 }
1843
1844out:
1845 return ret;
1846}
1847
1848static bool detect_ivrs(void)
1849{
1850 struct acpi_table_header *ivrs_base;
1851 acpi_size ivrs_size;
1852 acpi_status status;
1853
1854 status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
1855 if (status == AE_NOT_FOUND)
1856 return false;
1857 else if (ACPI_FAILURE(status)) {
1858 const char *err = acpi_format_exception(status);
1859 pr_err("AMD-Vi: IVRS table error: %s\n", err);
1860 return false;
1861 }
1862
1863 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
1864
1865
1866 pci_request_acs();
1867
1868 if (!disable_irq_remap)
1869 amd_iommu_irq_remap = true;
1870
1871 return true;
1872}
1873
1874static int amd_iommu_init_dma(void)
1875{
1876 struct amd_iommu *iommu;
1877 int ret;
1878
1879 init_device_table_dma();
1880
1881 for_each_iommu(iommu)
1882 iommu_flush_all_caches(iommu);
1883
1884 if (iommu_pass_through)
1885 ret = amd_iommu_init_passthrough();
1886 else
1887 ret = amd_iommu_init_dma_ops();
1888
1889 if (ret)
1890 return ret;
1891
1892 amd_iommu_init_api();
1893
1894 amd_iommu_init_notifier();
1895
1896 return 0;
1897}
1898
1899
1900
1901
1902
1903
1904
1905static int __init state_next(void)
1906{
1907 int ret = 0;
1908
1909 switch (init_state) {
1910 case IOMMU_START_STATE:
1911 if (!detect_ivrs()) {
1912 init_state = IOMMU_NOT_FOUND;
1913 ret = -ENODEV;
1914 } else {
1915 init_state = IOMMU_IVRS_DETECTED;
1916 }
1917 break;
1918 case IOMMU_IVRS_DETECTED:
1919 ret = early_amd_iommu_init();
1920 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
1921 break;
1922 case IOMMU_ACPI_FINISHED:
1923 early_enable_iommus();
1924 register_syscore_ops(&amd_iommu_syscore_ops);
1925 x86_platform.iommu_shutdown = disable_iommus;
1926 init_state = IOMMU_ENABLED;
1927 break;
1928 case IOMMU_ENABLED:
1929 ret = amd_iommu_init_pci();
1930 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
1931 enable_iommus_v2();
1932 break;
1933 case IOMMU_PCI_INIT:
1934 ret = amd_iommu_enable_interrupts();
1935 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
1936 break;
1937 case IOMMU_INTERRUPTS_EN:
1938 ret = amd_iommu_init_dma();
1939 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
1940 break;
1941 case IOMMU_DMA_OPS:
1942 init_state = IOMMU_INITIALIZED;
1943 break;
1944 case IOMMU_INITIALIZED:
1945
1946 break;
1947 case IOMMU_NOT_FOUND:
1948 case IOMMU_INIT_ERROR:
1949
1950 ret = -EINVAL;
1951 break;
1952 default:
1953
1954 BUG();
1955 }
1956
1957 return ret;
1958}
1959
1960static int __init iommu_go_to_state(enum iommu_init_state state)
1961{
1962 int ret = 0;
1963
1964 while (init_state != state) {
1965 ret = state_next();
1966 if (init_state == IOMMU_NOT_FOUND ||
1967 init_state == IOMMU_INIT_ERROR)
1968 break;
1969 }
1970
1971 return ret;
1972}
1973
1974#ifdef CONFIG_IRQ_REMAP
1975int __init amd_iommu_prepare(void)
1976{
1977 return iommu_go_to_state(IOMMU_ACPI_FINISHED);
1978}
1979
1980int __init amd_iommu_supported(void)
1981{
1982 return amd_iommu_irq_remap ? 1 : 0;
1983}
1984
1985int __init amd_iommu_enable(void)
1986{
1987 int ret;
1988
1989 ret = iommu_go_to_state(IOMMU_ENABLED);
1990 if (ret)
1991 return ret;
1992
1993 irq_remapping_enabled = 1;
1994
1995 return 0;
1996}
1997
1998void amd_iommu_disable(void)
1999{
2000 amd_iommu_suspend();
2001}
2002
2003int amd_iommu_reenable(int mode)
2004{
2005 amd_iommu_resume();
2006
2007 return 0;
2008}
2009
2010int __init amd_iommu_enable_faulting(void)
2011{
2012
2013 return 0;
2014}
2015#endif
2016
2017
2018
2019
2020
2021
2022static int __init amd_iommu_init(void)
2023{
2024 int ret;
2025
2026 ret = iommu_go_to_state(IOMMU_INITIALIZED);
2027 if (ret) {
2028 free_dma_resources();
2029 if (!irq_remapping_enabled) {
2030 disable_iommus();
2031 free_on_init_error();
2032 } else {
2033 struct amd_iommu *iommu;
2034
2035 uninit_device_table_dma();
2036 for_each_iommu(iommu)
2037 iommu_flush_all_caches(iommu);
2038 }
2039 }
2040
2041 return ret;
2042}
2043
2044
2045
2046
2047
2048
2049
2050
2051int __init amd_iommu_detect(void)
2052{
2053 int ret;
2054
2055 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
2056 return -ENODEV;
2057
2058 if (amd_iommu_disabled)
2059 return -ENODEV;
2060
2061 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2062 if (ret)
2063 return ret;
2064
2065 amd_iommu_detected = true;
2066 iommu_detected = 1;
2067 x86_init.iommu.iommu_init = amd_iommu_init;
2068
2069 return 0;
2070}
2071
2072
2073
2074
2075
2076
2077
2078
2079static int __init parse_amd_iommu_dump(char *str)
2080{
2081 amd_iommu_dump = true;
2082
2083 return 1;
2084}
2085
2086static int __init parse_amd_iommu_options(char *str)
2087{
2088 for (; *str; ++str) {
2089 if (strncmp(str, "fullflush", 9) == 0)
2090 amd_iommu_unmap_flush = true;
2091 if (strncmp(str, "off", 3) == 0)
2092 amd_iommu_disabled = true;
2093 if (strncmp(str, "force_isolation", 15) == 0)
2094 amd_iommu_force_isolation = true;
2095 }
2096
2097 return 1;
2098}
2099
2100__setup("amd_iommu_dump", parse_amd_iommu_dump);
2101__setup("amd_iommu=", parse_amd_iommu_options);
2102
2103IOMMU_INIT_FINISH(amd_iommu_detect,
2104 gart_iommu_hole_init,
2105 NULL,
2106 NULL);
2107
2108bool amd_iommu_v2_supported(void)
2109{
2110 return amd_iommu_v2_present;
2111}
2112EXPORT_SYMBOL(amd_iommu_v2_supported);
2113