1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/pci.h>
21#include <linux/acpi.h>
22#include <linux/list.h>
23#include <linux/slab.h>
24#include <linux/syscore_ops.h>
25#include <linux/interrupt.h>
26#include <linux/msi.h>
27#include <linux/amd-iommu.h>
28#include <linux/export.h>
29#include <linux/iommu.h>
30#include <asm/pci-direct.h>
31#include <asm/iommu.h>
32#include <asm/gart.h>
33#include <asm/x86_init.h>
34#include <asm/iommu_table.h>
35#include <asm/io_apic.h>
36#include <asm/irq_remapping.h>
37
38#include "amd_iommu_proto.h"
39#include "amd_iommu_types.h"
40#include "irq_remapping.h"
41
42
43
44
45#define IVRS_HEADER_LENGTH 48
46
47#define ACPI_IVHD_TYPE 0x10
48#define ACPI_IVMD_TYPE_ALL 0x20
49#define ACPI_IVMD_TYPE 0x21
50#define ACPI_IVMD_TYPE_RANGE 0x22
51
52#define IVHD_DEV_ALL 0x01
53#define IVHD_DEV_SELECT 0x02
54#define IVHD_DEV_SELECT_RANGE_START 0x03
55#define IVHD_DEV_RANGE_END 0x04
56#define IVHD_DEV_ALIAS 0x42
57#define IVHD_DEV_ALIAS_RANGE 0x43
58#define IVHD_DEV_EXT_SELECT 0x46
59#define IVHD_DEV_EXT_SELECT_RANGE 0x47
60#define IVHD_DEV_SPECIAL 0x48
61
62#define IVHD_SPECIAL_IOAPIC 1
63#define IVHD_SPECIAL_HPET 2
64
65#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
66#define IVHD_FLAG_PASSPW_EN_MASK 0x02
67#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
68#define IVHD_FLAG_ISOC_EN_MASK 0x08
69
70#define IVMD_FLAG_EXCL_RANGE 0x08
71#define IVMD_FLAG_UNITY_MAP 0x01
72
73#define ACPI_DEVFLAG_INITPASS 0x01
74#define ACPI_DEVFLAG_EXTINT 0x02
75#define ACPI_DEVFLAG_NMI 0x04
76#define ACPI_DEVFLAG_SYSMGT1 0x10
77#define ACPI_DEVFLAG_SYSMGT2 0x20
78#define ACPI_DEVFLAG_LINT0 0x40
79#define ACPI_DEVFLAG_LINT1 0x80
80#define ACPI_DEVFLAG_ATSDIS 0x10000000
81
82
83
84
85
86
87
88
89
90
91
92
93struct ivhd_header {
94 u8 type;
95 u8 flags;
96 u16 length;
97 u16 devid;
98 u16 cap_ptr;
99 u64 mmio_phys;
100 u16 pci_seg;
101 u16 info;
102 u32 efr;
103} __attribute__((packed));
104
105
106
107
108
109struct ivhd_entry {
110 u8 type;
111 u16 devid;
112 u8 flags;
113 u32 ext;
114} __attribute__((packed));
115
116
117
118
119
120struct ivmd_header {
121 u8 type;
122 u8 flags;
123 u16 length;
124 u16 devid;
125 u16 aux;
126 u64 resv;
127 u64 range_start;
128 u64 range_length;
129} __attribute__((packed));
130
131bool amd_iommu_dump;
132bool amd_iommu_irq_remap __read_mostly;
133
134static bool amd_iommu_detected;
135static bool __initdata amd_iommu_disabled;
136
137u16 amd_iommu_last_bdf;
138
139LIST_HEAD(amd_iommu_unity_map);
140
141u32 amd_iommu_unmap_flush;
142
143LIST_HEAD(amd_iommu_list);
144
145
146
147struct amd_iommu *amd_iommus[MAX_IOMMUS];
148int amd_iommus_present;
149
150
151bool amd_iommu_np_cache __read_mostly;
152bool amd_iommu_iotlb_sup __read_mostly = true;
153
154u32 amd_iommu_max_pasid __read_mostly = ~0;
155
156bool amd_iommu_v2_present __read_mostly;
157bool amd_iommu_pc_present __read_mostly;
158
159bool amd_iommu_force_isolation __read_mostly;
160
161
162
163
164LIST_HEAD(amd_iommu_pd_list);
165spinlock_t amd_iommu_pd_lock;
166
167
168
169
170
171
172
173struct dev_table_entry *amd_iommu_dev_table;
174
175
176
177
178
179
180u16 *amd_iommu_alias_table;
181
182
183
184
185
186struct amd_iommu **amd_iommu_rlookup_table;
187
188
189
190
191
192struct irq_remap_table **irq_lookup_table;
193
194
195
196
197
198unsigned long *amd_iommu_pd_alloc_bitmap;
199
200static u32 dev_table_size;
201static u32 alias_table_size;
202static u32 rlookup_table_size;
203
204enum iommu_init_state {
205 IOMMU_START_STATE,
206 IOMMU_IVRS_DETECTED,
207 IOMMU_ACPI_FINISHED,
208 IOMMU_ENABLED,
209 IOMMU_PCI_INIT,
210 IOMMU_INTERRUPTS_EN,
211 IOMMU_DMA_OPS,
212 IOMMU_INITIALIZED,
213 IOMMU_NOT_FOUND,
214 IOMMU_INIT_ERROR,
215};
216
217
218#define EARLY_MAP_SIZE 4
219static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
220static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
221static int __initdata early_ioapic_map_size;
222static int __initdata early_hpet_map_size;
223static bool __initdata cmdline_maps;
224
225static enum iommu_init_state init_state = IOMMU_START_STATE;
226
227static int amd_iommu_enable_interrupts(void);
228static int __init iommu_go_to_state(enum iommu_init_state state);
229static void init_device_table_dma(void);
230
231static inline void update_last_devid(u16 devid)
232{
233 if (devid > amd_iommu_last_bdf)
234 amd_iommu_last_bdf = devid;
235}
236
237static inline unsigned long tbl_size(int entry_size)
238{
239 unsigned shift = PAGE_SHIFT +
240 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
241
242 return 1UL << shift;
243}
244
245
246
247static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
248{
249 u32 val;
250
251 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
252 pci_read_config_dword(iommu->dev, 0xfc, &val);
253 return val;
254}
255
256static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
257{
258 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
259 pci_write_config_dword(iommu->dev, 0xfc, val);
260 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
261}
262
263static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
264{
265 u32 val;
266
267 pci_write_config_dword(iommu->dev, 0xf0, address);
268 pci_read_config_dword(iommu->dev, 0xf4, &val);
269 return val;
270}
271
272static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
273{
274 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
275 pci_write_config_dword(iommu->dev, 0xf4, val);
276}
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291static void iommu_set_exclusion_range(struct amd_iommu *iommu)
292{
293 u64 start = iommu->exclusion_start & PAGE_MASK;
294 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
295 u64 entry;
296
297 if (!iommu->exclusion_start)
298 return;
299
300 entry = start | MMIO_EXCL_ENABLE_MASK;
301 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
302 &entry, sizeof(entry));
303
304 entry = limit;
305 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
306 &entry, sizeof(entry));
307}
308
309
310static void iommu_set_device_table(struct amd_iommu *iommu)
311{
312 u64 entry;
313
314 BUG_ON(iommu->mmio_base == NULL);
315
316 entry = virt_to_phys(amd_iommu_dev_table);
317 entry |= (dev_table_size >> 12) - 1;
318 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
319 &entry, sizeof(entry));
320}
321
322
323static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
324{
325 u32 ctrl;
326
327 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
328 ctrl |= (1 << bit);
329 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
330}
331
332static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
333{
334 u32 ctrl;
335
336 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
337 ctrl &= ~(1 << bit);
338 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
339}
340
341static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
342{
343 u32 ctrl;
344
345 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
346 ctrl &= ~CTRL_INV_TO_MASK;
347 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
348 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
349}
350
351
352static void iommu_enable(struct amd_iommu *iommu)
353{
354 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
355}
356
357static void iommu_disable(struct amd_iommu *iommu)
358{
359
360 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
361
362
363 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
364 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
365
366
367 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
368}
369
370
371
372
373
374static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
375{
376 if (!request_mem_region(address, end, "amd_iommu")) {
377 pr_err("AMD-Vi: Can not reserve memory region %llx-%llx for mmio\n",
378 address, end);
379 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
380 return NULL;
381 }
382
383 return (u8 __iomem *)ioremap_nocache(address, end);
384}
385
386static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
387{
388 if (iommu->mmio_base)
389 iounmap(iommu->mmio_base);
390 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
391}
392
393
394
395
396
397
398
399
400
401
402
403
404
405static inline int ivhd_entry_length(u8 *ivhd)
406{
407 return 0x04 << (*ivhd >> 6);
408}
409
410
411
412
413
414static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
415{
416 u32 cap;
417
418 cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
419 update_last_devid(PCI_DEVID(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
420
421 return 0;
422}
423
424
425
426
427
428static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
429{
430 u8 *p = (void *)h, *end = (void *)h;
431 struct ivhd_entry *dev;
432
433 p += sizeof(*h);
434 end += h->length;
435
436 find_last_devid_on_pci(PCI_BUS_NUM(h->devid),
437 PCI_SLOT(h->devid),
438 PCI_FUNC(h->devid),
439 h->cap_ptr);
440
441 while (p < end) {
442 dev = (struct ivhd_entry *)p;
443 switch (dev->type) {
444 case IVHD_DEV_SELECT:
445 case IVHD_DEV_RANGE_END:
446 case IVHD_DEV_ALIAS:
447 case IVHD_DEV_EXT_SELECT:
448
449 update_last_devid(dev->devid);
450 break;
451 default:
452 break;
453 }
454 p += ivhd_entry_length(p);
455 }
456
457 WARN_ON(p != end);
458
459 return 0;
460}
461
462
463
464
465
466
467static int __init find_last_devid_acpi(struct acpi_table_header *table)
468{
469 int i;
470 u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
471 struct ivhd_header *h;
472
473
474
475
476
477 for (i = 0; i < table->length; ++i)
478 checksum += p[i];
479 if (checksum != 0)
480
481 return -ENODEV;
482
483 p += IVRS_HEADER_LENGTH;
484
485 end += table->length;
486 while (p < end) {
487 h = (struct ivhd_header *)p;
488 switch (h->type) {
489 case ACPI_IVHD_TYPE:
490 find_last_devid_from_ivhd(h);
491 break;
492 default:
493 break;
494 }
495 p += h->length;
496 }
497 WARN_ON(p != end);
498
499 return 0;
500}
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
517{
518 u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
519 get_order(CMD_BUFFER_SIZE));
520
521 if (cmd_buf == NULL)
522 return NULL;
523
524 iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED;
525
526 return cmd_buf;
527}
528
529
530
531
532
533void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
534{
535 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
536
537 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
538 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
539
540 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
541}
542
543
544
545
546
547static void iommu_enable_command_buffer(struct amd_iommu *iommu)
548{
549 u64 entry;
550
551 BUG_ON(iommu->cmd_buf == NULL);
552
553 entry = (u64)virt_to_phys(iommu->cmd_buf);
554 entry |= MMIO_CMD_SIZE_512;
555
556 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
557 &entry, sizeof(entry));
558
559 amd_iommu_reset_cmd_buffer(iommu);
560 iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED);
561}
562
563static void __init free_command_buffer(struct amd_iommu *iommu)
564{
565 free_pages((unsigned long)iommu->cmd_buf,
566 get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED)));
567}
568
569
570static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
571{
572 iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
573 get_order(EVT_BUFFER_SIZE));
574
575 if (iommu->evt_buf == NULL)
576 return NULL;
577
578 iommu->evt_buf_size = EVT_BUFFER_SIZE;
579
580 return iommu->evt_buf;
581}
582
583static void iommu_enable_event_buffer(struct amd_iommu *iommu)
584{
585 u64 entry;
586
587 BUG_ON(iommu->evt_buf == NULL);
588
589 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
590
591 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
592 &entry, sizeof(entry));
593
594
595 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
596 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
597
598 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
599}
600
601static void __init free_event_buffer(struct amd_iommu *iommu)
602{
603 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
604}
605
606
607static u8 * __init alloc_ppr_log(struct amd_iommu *iommu)
608{
609 iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
610 get_order(PPR_LOG_SIZE));
611
612 if (iommu->ppr_log == NULL)
613 return NULL;
614
615 return iommu->ppr_log;
616}
617
618static void iommu_enable_ppr_log(struct amd_iommu *iommu)
619{
620 u64 entry;
621
622 if (iommu->ppr_log == NULL)
623 return;
624
625 entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
626
627 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
628 &entry, sizeof(entry));
629
630
631 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
632 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
633
634 iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
635 iommu_feature_enable(iommu, CONTROL_PPR_EN);
636}
637
638static void __init free_ppr_log(struct amd_iommu *iommu)
639{
640 if (iommu->ppr_log == NULL)
641 return;
642
643 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
644}
645
646static void iommu_enable_gt(struct amd_iommu *iommu)
647{
648 if (!iommu_feature(iommu, FEATURE_GT))
649 return;
650
651 iommu_feature_enable(iommu, CONTROL_GT_EN);
652}
653
654
655static void set_dev_entry_bit(u16 devid, u8 bit)
656{
657 int i = (bit >> 6) & 0x03;
658 int _bit = bit & 0x3f;
659
660 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
661}
662
663static int get_dev_entry_bit(u16 devid, u8 bit)
664{
665 int i = (bit >> 6) & 0x03;
666 int _bit = bit & 0x3f;
667
668 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
669}
670
671
672void amd_iommu_apply_erratum_63(u16 devid)
673{
674 int sysmgt;
675
676 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
677 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
678
679 if (sysmgt == 0x01)
680 set_dev_entry_bit(devid, DEV_ENTRY_IW);
681}
682
683
684static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
685{
686 amd_iommu_rlookup_table[devid] = iommu;
687}
688
689
690
691
692
693static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
694 u16 devid, u32 flags, u32 ext_flags)
695{
696 if (flags & ACPI_DEVFLAG_INITPASS)
697 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
698 if (flags & ACPI_DEVFLAG_EXTINT)
699 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
700 if (flags & ACPI_DEVFLAG_NMI)
701 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
702 if (flags & ACPI_DEVFLAG_SYSMGT1)
703 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
704 if (flags & ACPI_DEVFLAG_SYSMGT2)
705 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
706 if (flags & ACPI_DEVFLAG_LINT0)
707 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
708 if (flags & ACPI_DEVFLAG_LINT1)
709 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
710
711 amd_iommu_apply_erratum_63(devid);
712
713 set_iommu_for_device(iommu, devid);
714}
715
716static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
717{
718 struct devid_map *entry;
719 struct list_head *list;
720
721 if (type == IVHD_SPECIAL_IOAPIC)
722 list = &ioapic_map;
723 else if (type == IVHD_SPECIAL_HPET)
724 list = &hpet_map;
725 else
726 return -EINVAL;
727
728 list_for_each_entry(entry, list, list) {
729 if (!(entry->id == id && entry->cmd_line))
730 continue;
731
732 pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n",
733 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
734
735 *devid = entry->devid;
736
737 return 0;
738 }
739
740 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
741 if (!entry)
742 return -ENOMEM;
743
744 entry->id = id;
745 entry->devid = *devid;
746 entry->cmd_line = cmd_line;
747
748 list_add_tail(&entry->list, list);
749
750 return 0;
751}
752
753static int __init add_early_maps(void)
754{
755 int i, ret;
756
757 for (i = 0; i < early_ioapic_map_size; ++i) {
758 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
759 early_ioapic_map[i].id,
760 &early_ioapic_map[i].devid,
761 early_ioapic_map[i].cmd_line);
762 if (ret)
763 return ret;
764 }
765
766 for (i = 0; i < early_hpet_map_size; ++i) {
767 ret = add_special_device(IVHD_SPECIAL_HPET,
768 early_hpet_map[i].id,
769 &early_hpet_map[i].devid,
770 early_hpet_map[i].cmd_line);
771 if (ret)
772 return ret;
773 }
774
775 return 0;
776}
777
778
779
780
781
782static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
783{
784 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
785
786 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
787 return;
788
789 if (iommu) {
790
791
792
793
794
795 set_dev_entry_bit(devid, DEV_ENTRY_EX);
796 iommu->exclusion_start = m->range_start;
797 iommu->exclusion_length = m->range_length;
798 }
799}
800
801
802
803
804
805static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
806 struct ivhd_header *h)
807{
808 u8 *p = (u8 *)h;
809 u8 *end = p, flags = 0;
810 u16 devid = 0, devid_start = 0, devid_to = 0;
811 u32 dev_i, ext_flags = 0;
812 bool alias = false;
813 struct ivhd_entry *e;
814 int ret;
815
816
817 ret = add_early_maps();
818 if (ret)
819 return ret;
820
821
822
823
824 iommu->acpi_flags = h->flags;
825
826
827
828
829 p += sizeof(struct ivhd_header);
830 end += h->length;
831
832
833 while (p < end) {
834 e = (struct ivhd_entry *)p;
835 switch (e->type) {
836 case IVHD_DEV_ALL:
837
838 DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x"
839 " last device %02x:%02x.%x flags: %02x\n",
840 PCI_BUS_NUM(iommu->first_device),
841 PCI_SLOT(iommu->first_device),
842 PCI_FUNC(iommu->first_device),
843 PCI_BUS_NUM(iommu->last_device),
844 PCI_SLOT(iommu->last_device),
845 PCI_FUNC(iommu->last_device),
846 e->flags);
847
848 for (dev_i = iommu->first_device;
849 dev_i <= iommu->last_device; ++dev_i)
850 set_dev_entry_from_acpi(iommu, dev_i,
851 e->flags, 0);
852 break;
853 case IVHD_DEV_SELECT:
854
855 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
856 "flags: %02x\n",
857 PCI_BUS_NUM(e->devid),
858 PCI_SLOT(e->devid),
859 PCI_FUNC(e->devid),
860 e->flags);
861
862 devid = e->devid;
863 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
864 break;
865 case IVHD_DEV_SELECT_RANGE_START:
866
867 DUMP_printk(" DEV_SELECT_RANGE_START\t "
868 "devid: %02x:%02x.%x flags: %02x\n",
869 PCI_BUS_NUM(e->devid),
870 PCI_SLOT(e->devid),
871 PCI_FUNC(e->devid),
872 e->flags);
873
874 devid_start = e->devid;
875 flags = e->flags;
876 ext_flags = 0;
877 alias = false;
878 break;
879 case IVHD_DEV_ALIAS:
880
881 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
882 "flags: %02x devid_to: %02x:%02x.%x\n",
883 PCI_BUS_NUM(e->devid),
884 PCI_SLOT(e->devid),
885 PCI_FUNC(e->devid),
886 e->flags,
887 PCI_BUS_NUM(e->ext >> 8),
888 PCI_SLOT(e->ext >> 8),
889 PCI_FUNC(e->ext >> 8));
890
891 devid = e->devid;
892 devid_to = e->ext >> 8;
893 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
894 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
895 amd_iommu_alias_table[devid] = devid_to;
896 break;
897 case IVHD_DEV_ALIAS_RANGE:
898
899 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
900 "devid: %02x:%02x.%x flags: %02x "
901 "devid_to: %02x:%02x.%x\n",
902 PCI_BUS_NUM(e->devid),
903 PCI_SLOT(e->devid),
904 PCI_FUNC(e->devid),
905 e->flags,
906 PCI_BUS_NUM(e->ext >> 8),
907 PCI_SLOT(e->ext >> 8),
908 PCI_FUNC(e->ext >> 8));
909
910 devid_start = e->devid;
911 flags = e->flags;
912 devid_to = e->ext >> 8;
913 ext_flags = 0;
914 alias = true;
915 break;
916 case IVHD_DEV_EXT_SELECT:
917
918 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
919 "flags: %02x ext: %08x\n",
920 PCI_BUS_NUM(e->devid),
921 PCI_SLOT(e->devid),
922 PCI_FUNC(e->devid),
923 e->flags, e->ext);
924
925 devid = e->devid;
926 set_dev_entry_from_acpi(iommu, devid, e->flags,
927 e->ext);
928 break;
929 case IVHD_DEV_EXT_SELECT_RANGE:
930
931 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
932 "%02x:%02x.%x flags: %02x ext: %08x\n",
933 PCI_BUS_NUM(e->devid),
934 PCI_SLOT(e->devid),
935 PCI_FUNC(e->devid),
936 e->flags, e->ext);
937
938 devid_start = e->devid;
939 flags = e->flags;
940 ext_flags = e->ext;
941 alias = false;
942 break;
943 case IVHD_DEV_RANGE_END:
944
945 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
946 PCI_BUS_NUM(e->devid),
947 PCI_SLOT(e->devid),
948 PCI_FUNC(e->devid));
949
950 devid = e->devid;
951 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
952 if (alias) {
953 amd_iommu_alias_table[dev_i] = devid_to;
954 set_dev_entry_from_acpi(iommu,
955 devid_to, flags, ext_flags);
956 }
957 set_dev_entry_from_acpi(iommu, dev_i,
958 flags, ext_flags);
959 }
960 break;
961 case IVHD_DEV_SPECIAL: {
962 u8 handle, type;
963 const char *var;
964 u16 devid;
965 int ret;
966
967 handle = e->ext & 0xff;
968 devid = (e->ext >> 8) & 0xffff;
969 type = (e->ext >> 24) & 0xff;
970
971 if (type == IVHD_SPECIAL_IOAPIC)
972 var = "IOAPIC";
973 else if (type == IVHD_SPECIAL_HPET)
974 var = "HPET";
975 else
976 var = "UNKNOWN";
977
978 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
979 var, (int)handle,
980 PCI_BUS_NUM(devid),
981 PCI_SLOT(devid),
982 PCI_FUNC(devid));
983
984 ret = add_special_device(type, handle, &devid, false);
985 if (ret)
986 return ret;
987
988
989
990
991
992
993 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
994
995 break;
996 }
997 default:
998 break;
999 }
1000
1001 p += ivhd_entry_length(p);
1002 }
1003
1004 return 0;
1005}
1006
1007
1008static int __init init_iommu_devices(struct amd_iommu *iommu)
1009{
1010 u32 i;
1011
1012 for (i = iommu->first_device; i <= iommu->last_device; ++i)
1013 set_iommu_for_device(iommu, i);
1014
1015 return 0;
1016}
1017
1018static void __init free_iommu_one(struct amd_iommu *iommu)
1019{
1020 free_command_buffer(iommu);
1021 free_event_buffer(iommu);
1022 free_ppr_log(iommu);
1023 iommu_unmap_mmio_space(iommu);
1024}
1025
1026static void __init free_iommu_all(void)
1027{
1028 struct amd_iommu *iommu, *next;
1029
1030 for_each_iommu_safe(iommu, next) {
1031 list_del(&iommu->list);
1032 free_iommu_one(iommu);
1033 kfree(iommu);
1034 }
1035}
1036
1037
1038
1039
1040
1041
1042
1043static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1044{
1045 u32 value;
1046
1047 if ((boot_cpu_data.x86 != 0x15) ||
1048 (boot_cpu_data.x86_model < 0x10) ||
1049 (boot_cpu_data.x86_model > 0x1f))
1050 return;
1051
1052 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1053 pci_read_config_dword(iommu->dev, 0xf4, &value);
1054
1055 if (value & BIT(2))
1056 return;
1057
1058
1059 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1060
1061 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1062 pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
1063 dev_name(&iommu->dev->dev));
1064
1065
1066 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1067}
1068
1069
1070
1071
1072
1073
1074static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1075{
1076 int ret;
1077
1078 spin_lock_init(&iommu->lock);
1079
1080
1081 list_add_tail(&iommu->list, &amd_iommu_list);
1082 iommu->index = amd_iommus_present++;
1083
1084 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1085 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
1086 return -ENOSYS;
1087 }
1088
1089
1090 amd_iommus[iommu->index] = iommu;
1091
1092
1093
1094
1095 iommu->devid = h->devid;
1096 iommu->cap_ptr = h->cap_ptr;
1097 iommu->pci_seg = h->pci_seg;
1098 iommu->mmio_phys = h->mmio_phys;
1099
1100
1101 if ((h->efr != 0) &&
1102 ((h->efr & (0xF << 13)) != 0) &&
1103 ((h->efr & (0x3F << 17)) != 0)) {
1104 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1105 } else {
1106 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1107 }
1108
1109 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1110 iommu->mmio_phys_end);
1111 if (!iommu->mmio_base)
1112 return -ENOMEM;
1113
1114 iommu->cmd_buf = alloc_command_buffer(iommu);
1115 if (!iommu->cmd_buf)
1116 return -ENOMEM;
1117
1118 iommu->evt_buf = alloc_event_buffer(iommu);
1119 if (!iommu->evt_buf)
1120 return -ENOMEM;
1121
1122 iommu->int_enabled = false;
1123
1124 ret = init_iommu_from_acpi(iommu, h);
1125 if (ret)
1126 return ret;
1127
1128 ret = amd_iommu_create_irq_domain(iommu);
1129 if (ret)
1130 return ret;
1131
1132
1133
1134
1135
1136 amd_iommu_rlookup_table[iommu->devid] = NULL;
1137
1138 init_iommu_devices(iommu);
1139
1140 return 0;
1141}
1142
1143
1144
1145
1146
1147static int __init init_iommu_all(struct acpi_table_header *table)
1148{
1149 u8 *p = (u8 *)table, *end = (u8 *)table;
1150 struct ivhd_header *h;
1151 struct amd_iommu *iommu;
1152 int ret;
1153
1154 end += table->length;
1155 p += IVRS_HEADER_LENGTH;
1156
1157 while (p < end) {
1158 h = (struct ivhd_header *)p;
1159 switch (*p) {
1160 case ACPI_IVHD_TYPE:
1161
1162 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1163 "seg: %d flags: %01x info %04x\n",
1164 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1165 PCI_FUNC(h->devid), h->cap_ptr,
1166 h->pci_seg, h->flags, h->info);
1167 DUMP_printk(" mmio-addr: %016llx\n",
1168 h->mmio_phys);
1169
1170 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1171 if (iommu == NULL)
1172 return -ENOMEM;
1173
1174 ret = init_iommu_one(iommu, h);
1175 if (ret)
1176 return ret;
1177 break;
1178 default:
1179 break;
1180 }
1181 p += h->length;
1182
1183 }
1184 WARN_ON(p != end);
1185
1186 return 0;
1187}
1188
1189
1190static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1191{
1192 u64 val = 0xabcd, val2 = 0;
1193
1194 if (!iommu_feature(iommu, FEATURE_PC))
1195 return;
1196
1197 amd_iommu_pc_present = true;
1198
1199
1200 if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) ||
1201 (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) ||
1202 (val != val2)) {
1203 pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
1204 amd_iommu_pc_present = false;
1205 return;
1206 }
1207
1208 pr_info("AMD-Vi: IOMMU performance counters supported\n");
1209
1210 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1211 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1212 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1213}
1214
1215static ssize_t amd_iommu_show_cap(struct device *dev,
1216 struct device_attribute *attr,
1217 char *buf)
1218{
1219 struct amd_iommu *iommu = dev_get_drvdata(dev);
1220 return sprintf(buf, "%x\n", iommu->cap);
1221}
1222static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1223
1224static ssize_t amd_iommu_show_features(struct device *dev,
1225 struct device_attribute *attr,
1226 char *buf)
1227{
1228 struct amd_iommu *iommu = dev_get_drvdata(dev);
1229 return sprintf(buf, "%llx\n", iommu->features);
1230}
1231static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1232
1233static struct attribute *amd_iommu_attrs[] = {
1234 &dev_attr_cap.attr,
1235 &dev_attr_features.attr,
1236 NULL,
1237};
1238
1239static struct attribute_group amd_iommu_group = {
1240 .name = "amd-iommu",
1241 .attrs = amd_iommu_attrs,
1242};
1243
1244static const struct attribute_group *amd_iommu_groups[] = {
1245 &amd_iommu_group,
1246 NULL,
1247};
1248
1249static int iommu_init_pci(struct amd_iommu *iommu)
1250{
1251 int cap_ptr = iommu->cap_ptr;
1252 u32 range, misc, low, high;
1253
1254 iommu->dev = pci_get_bus_and_slot(PCI_BUS_NUM(iommu->devid),
1255 iommu->devid & 0xff);
1256 if (!iommu->dev)
1257 return -ENODEV;
1258
1259 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1260 &iommu->cap);
1261 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
1262 &range);
1263 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
1264 &misc);
1265
1266 iommu->first_device = PCI_DEVID(MMIO_GET_BUS(range),
1267 MMIO_GET_FD(range));
1268 iommu->last_device = PCI_DEVID(MMIO_GET_BUS(range),
1269 MMIO_GET_LD(range));
1270
1271 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1272 amd_iommu_iotlb_sup = false;
1273
1274
1275 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
1276 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
1277
1278 iommu->features = ((u64)high << 32) | low;
1279
1280 if (iommu_feature(iommu, FEATURE_GT)) {
1281 int glxval;
1282 u32 max_pasid;
1283 u64 pasmax;
1284
1285 pasmax = iommu->features & FEATURE_PASID_MASK;
1286 pasmax >>= FEATURE_PASID_SHIFT;
1287 max_pasid = (1 << (pasmax + 1)) - 1;
1288
1289 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1290
1291 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
1292
1293 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1294 glxval >>= FEATURE_GLXVAL_SHIFT;
1295
1296 if (amd_iommu_max_glx_val == -1)
1297 amd_iommu_max_glx_val = glxval;
1298 else
1299 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1300 }
1301
1302 if (iommu_feature(iommu, FEATURE_GT) &&
1303 iommu_feature(iommu, FEATURE_PPR)) {
1304 iommu->is_iommu_v2 = true;
1305 amd_iommu_v2_present = true;
1306 }
1307
1308 if (iommu_feature(iommu, FEATURE_PPR)) {
1309 iommu->ppr_log = alloc_ppr_log(iommu);
1310 if (!iommu->ppr_log)
1311 return -ENOMEM;
1312 }
1313
1314 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1315 amd_iommu_np_cache = true;
1316
1317 init_iommu_perf_ctr(iommu);
1318
1319 if (is_rd890_iommu(iommu->dev)) {
1320 int i, j;
1321
1322 iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
1323 PCI_DEVFN(0, 0));
1324
1325
1326
1327
1328
1329
1330 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1331 &iommu->stored_addr_lo);
1332 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1333 &iommu->stored_addr_hi);
1334
1335
1336 iommu->stored_addr_lo &= ~1;
1337
1338 for (i = 0; i < 6; i++)
1339 for (j = 0; j < 0x12; j++)
1340 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1341
1342 for (i = 0; i < 0x83; i++)
1343 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1344 }
1345
1346 amd_iommu_erratum_746_workaround(iommu);
1347
1348 iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
1349 amd_iommu_groups, "ivhd%d",
1350 iommu->index);
1351
1352 return pci_enable_device(iommu->dev);
1353}
1354
1355static void print_iommu_info(void)
1356{
1357 static const char * const feat_str[] = {
1358 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1359 "IA", "GA", "HE", "PC"
1360 };
1361 struct amd_iommu *iommu;
1362
1363 for_each_iommu(iommu) {
1364 int i;
1365
1366 pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n",
1367 dev_name(&iommu->dev->dev), iommu->cap_ptr);
1368
1369 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1370 pr_info("AMD-Vi: Extended features: ");
1371 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1372 if (iommu_feature(iommu, (1ULL << i)))
1373 pr_cont(" %s", feat_str[i]);
1374 }
1375 pr_cont("\n");
1376 }
1377 }
1378 if (irq_remapping_enabled)
1379 pr_info("AMD-Vi: Interrupt remapping enabled\n");
1380}
1381
1382static int __init amd_iommu_init_pci(void)
1383{
1384 struct amd_iommu *iommu;
1385 int ret = 0;
1386
1387 for_each_iommu(iommu) {
1388 ret = iommu_init_pci(iommu);
1389 if (ret)
1390 break;
1391 }
1392
1393 init_device_table_dma();
1394
1395 for_each_iommu(iommu)
1396 iommu_flush_all_caches(iommu);
1397
1398 ret = amd_iommu_init_api();
1399
1400 if (!ret)
1401 print_iommu_info();
1402
1403 return ret;
1404}
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415static int iommu_setup_msi(struct amd_iommu *iommu)
1416{
1417 int r;
1418
1419 r = pci_enable_msi(iommu->dev);
1420 if (r)
1421 return r;
1422
1423 r = request_threaded_irq(iommu->dev->irq,
1424 amd_iommu_int_handler,
1425 amd_iommu_int_thread,
1426 0, "AMD-Vi",
1427 iommu);
1428
1429 if (r) {
1430 pci_disable_msi(iommu->dev);
1431 return r;
1432 }
1433
1434 iommu->int_enabled = true;
1435
1436 return 0;
1437}
1438
1439static int iommu_init_msi(struct amd_iommu *iommu)
1440{
1441 int ret;
1442
1443 if (iommu->int_enabled)
1444 goto enable_faults;
1445
1446 if (iommu->dev->msi_cap)
1447 ret = iommu_setup_msi(iommu);
1448 else
1449 ret = -ENODEV;
1450
1451 if (ret)
1452 return ret;
1453
1454enable_faults:
1455 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1456
1457 if (iommu->ppr_log != NULL)
1458 iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
1459
1460 return 0;
1461}
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471static void __init free_unity_maps(void)
1472{
1473 struct unity_map_entry *entry, *next;
1474
1475 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
1476 list_del(&entry->list);
1477 kfree(entry);
1478 }
1479}
1480
1481
1482static int __init init_exclusion_range(struct ivmd_header *m)
1483{
1484 int i;
1485
1486 switch (m->type) {
1487 case ACPI_IVMD_TYPE:
1488 set_device_exclusion_range(m->devid, m);
1489 break;
1490 case ACPI_IVMD_TYPE_ALL:
1491 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1492 set_device_exclusion_range(i, m);
1493 break;
1494 case ACPI_IVMD_TYPE_RANGE:
1495 for (i = m->devid; i <= m->aux; ++i)
1496 set_device_exclusion_range(i, m);
1497 break;
1498 default:
1499 break;
1500 }
1501
1502 return 0;
1503}
1504
1505
1506static int __init init_unity_map_range(struct ivmd_header *m)
1507{
1508 struct unity_map_entry *e = NULL;
1509 char *s;
1510
1511 e = kzalloc(sizeof(*e), GFP_KERNEL);
1512 if (e == NULL)
1513 return -ENOMEM;
1514
1515 switch (m->type) {
1516 default:
1517 kfree(e);
1518 return 0;
1519 case ACPI_IVMD_TYPE:
1520 s = "IVMD_TYPEi\t\t\t";
1521 e->devid_start = e->devid_end = m->devid;
1522 break;
1523 case ACPI_IVMD_TYPE_ALL:
1524 s = "IVMD_TYPE_ALL\t\t";
1525 e->devid_start = 0;
1526 e->devid_end = amd_iommu_last_bdf;
1527 break;
1528 case ACPI_IVMD_TYPE_RANGE:
1529 s = "IVMD_TYPE_RANGE\t\t";
1530 e->devid_start = m->devid;
1531 e->devid_end = m->aux;
1532 break;
1533 }
1534 e->address_start = PAGE_ALIGN(m->range_start);
1535 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
1536 e->prot = m->flags >> 1;
1537
1538 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
1539 " range_start: %016llx range_end: %016llx flags: %x\n", s,
1540 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
1541 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
1542 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
1543 e->address_start, e->address_end, m->flags);
1544
1545 list_add_tail(&e->list, &amd_iommu_unity_map);
1546
1547 return 0;
1548}
1549
1550
1551static int __init init_memory_definitions(struct acpi_table_header *table)
1552{
1553 u8 *p = (u8 *)table, *end = (u8 *)table;
1554 struct ivmd_header *m;
1555
1556 end += table->length;
1557 p += IVRS_HEADER_LENGTH;
1558
1559 while (p < end) {
1560 m = (struct ivmd_header *)p;
1561 if (m->flags & IVMD_FLAG_EXCL_RANGE)
1562 init_exclusion_range(m);
1563 else if (m->flags & IVMD_FLAG_UNITY_MAP)
1564 init_unity_map_range(m);
1565
1566 p += m->length;
1567 }
1568
1569 return 0;
1570}
1571
1572
1573
1574
1575
1576static void init_device_table_dma(void)
1577{
1578 u32 devid;
1579
1580 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1581 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
1582 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
1583 }
1584}
1585
1586static void __init uninit_device_table_dma(void)
1587{
1588 u32 devid;
1589
1590 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1591 amd_iommu_dev_table[devid].data[0] = 0ULL;
1592 amd_iommu_dev_table[devid].data[1] = 0ULL;
1593 }
1594}
1595
1596static void init_device_table(void)
1597{
1598 u32 devid;
1599
1600 if (!amd_iommu_irq_remap)
1601 return;
1602
1603 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
1604 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
1605}
1606
1607static void iommu_init_flags(struct amd_iommu *iommu)
1608{
1609 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
1610 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
1611 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
1612
1613 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
1614 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
1615 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
1616
1617 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
1618 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
1619 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
1620
1621 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
1622 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
1623 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
1624
1625
1626
1627
1628 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
1629
1630
1631 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
1632}
1633
1634static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1635{
1636 int i, j;
1637 u32 ioc_feature_control;
1638 struct pci_dev *pdev = iommu->root_pdev;
1639
1640
1641 if (!is_rd890_iommu(iommu->dev) || !pdev)
1642 return;
1643
1644
1645
1646
1647
1648
1649
1650 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
1651 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
1652
1653
1654 if (!(ioc_feature_control & 0x1))
1655 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
1656
1657
1658 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1659 iommu->stored_addr_lo);
1660 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
1661 iommu->stored_addr_hi);
1662
1663
1664 for (i = 0; i < 6; i++)
1665 for (j = 0; j < 0x12; j++)
1666 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
1667
1668
1669 for (i = 0; i < 0x83; i++)
1670 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
1671
1672
1673 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1674 iommu->stored_addr_lo | 1);
1675}
1676
1677
1678
1679
1680
1681static void early_enable_iommus(void)
1682{
1683 struct amd_iommu *iommu;
1684
1685 for_each_iommu(iommu) {
1686 iommu_disable(iommu);
1687 iommu_init_flags(iommu);
1688 iommu_set_device_table(iommu);
1689 iommu_enable_command_buffer(iommu);
1690 iommu_enable_event_buffer(iommu);
1691 iommu_set_exclusion_range(iommu);
1692 iommu_enable(iommu);
1693 iommu_flush_all_caches(iommu);
1694 }
1695}
1696
1697static void enable_iommus_v2(void)
1698{
1699 struct amd_iommu *iommu;
1700
1701 for_each_iommu(iommu) {
1702 iommu_enable_ppr_log(iommu);
1703 iommu_enable_gt(iommu);
1704 }
1705}
1706
1707static void enable_iommus(void)
1708{
1709 early_enable_iommus();
1710
1711 enable_iommus_v2();
1712}
1713
1714static void disable_iommus(void)
1715{
1716 struct amd_iommu *iommu;
1717
1718 for_each_iommu(iommu)
1719 iommu_disable(iommu);
1720}
1721
1722
1723
1724
1725
1726
1727static void amd_iommu_resume(void)
1728{
1729 struct amd_iommu *iommu;
1730
1731 for_each_iommu(iommu)
1732 iommu_apply_resume_quirks(iommu);
1733
1734
1735 enable_iommus();
1736
1737 amd_iommu_enable_interrupts();
1738}
1739
1740static int amd_iommu_suspend(void)
1741{
1742
1743 disable_iommus();
1744
1745 return 0;
1746}
1747
1748static struct syscore_ops amd_iommu_syscore_ops = {
1749 .suspend = amd_iommu_suspend,
1750 .resume = amd_iommu_resume,
1751};
1752
1753static void __init free_on_init_error(void)
1754{
1755 free_pages((unsigned long)irq_lookup_table,
1756 get_order(rlookup_table_size));
1757
1758 if (amd_iommu_irq_cache) {
1759 kmem_cache_destroy(amd_iommu_irq_cache);
1760 amd_iommu_irq_cache = NULL;
1761
1762 }
1763
1764 free_pages((unsigned long)amd_iommu_rlookup_table,
1765 get_order(rlookup_table_size));
1766
1767 free_pages((unsigned long)amd_iommu_alias_table,
1768 get_order(alias_table_size));
1769
1770 free_pages((unsigned long)amd_iommu_dev_table,
1771 get_order(dev_table_size));
1772
1773 free_iommu_all();
1774
1775#ifdef CONFIG_GART_IOMMU
1776
1777
1778
1779
1780 gart_iommu_init();
1781
1782#endif
1783}
1784
1785
1786#define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
1787
1788static bool __init check_ioapic_information(void)
1789{
1790 const char *fw_bug = FW_BUG;
1791 bool ret, has_sb_ioapic;
1792 int idx;
1793
1794 has_sb_ioapic = false;
1795 ret = false;
1796
1797
1798
1799
1800
1801
1802 if (cmdline_maps)
1803 fw_bug = "";
1804
1805 for (idx = 0; idx < nr_ioapics; idx++) {
1806 int devid, id = mpc_ioapic_id(idx);
1807
1808 devid = get_ioapic_devid(id);
1809 if (devid < 0) {
1810 pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n",
1811 fw_bug, id);
1812 ret = false;
1813 } else if (devid == IOAPIC_SB_DEVID) {
1814 has_sb_ioapic = true;
1815 ret = true;
1816 }
1817 }
1818
1819 if (!has_sb_ioapic) {
1820
1821
1822
1823
1824
1825
1826
1827
1828 pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug);
1829 }
1830
1831 if (!ret)
1832 pr_err("AMD-Vi: Disabling interrupt remapping\n");
1833
1834 return ret;
1835}
1836
1837static void __init free_dma_resources(void)
1838{
1839 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
1840 get_order(MAX_DOMAIN_ID/8));
1841
1842 free_unity_maps();
1843}
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870static int __init early_amd_iommu_init(void)
1871{
1872 struct acpi_table_header *ivrs_base;
1873 acpi_size ivrs_size;
1874 acpi_status status;
1875 int i, ret = 0;
1876
1877 if (!amd_iommu_detected)
1878 return -ENODEV;
1879
1880 status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
1881 if (status == AE_NOT_FOUND)
1882 return -ENODEV;
1883 else if (ACPI_FAILURE(status)) {
1884 const char *err = acpi_format_exception(status);
1885 pr_err("AMD-Vi: IVRS table error: %s\n", err);
1886 return -EINVAL;
1887 }
1888
1889
1890
1891
1892
1893
1894 ret = find_last_devid_acpi(ivrs_base);
1895 if (ret)
1896 goto out;
1897
1898 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
1899 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
1900 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
1901
1902
1903 ret = -ENOMEM;
1904 amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1905 get_order(dev_table_size));
1906 if (amd_iommu_dev_table == NULL)
1907 goto out;
1908
1909
1910
1911
1912
1913 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
1914 get_order(alias_table_size));
1915 if (amd_iommu_alias_table == NULL)
1916 goto out;
1917
1918
1919 amd_iommu_rlookup_table = (void *)__get_free_pages(
1920 GFP_KERNEL | __GFP_ZERO,
1921 get_order(rlookup_table_size));
1922 if (amd_iommu_rlookup_table == NULL)
1923 goto out;
1924
1925 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
1926 GFP_KERNEL | __GFP_ZERO,
1927 get_order(MAX_DOMAIN_ID/8));
1928 if (amd_iommu_pd_alloc_bitmap == NULL)
1929 goto out;
1930
1931
1932
1933
1934 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1935 amd_iommu_alias_table[i] = i;
1936
1937
1938
1939
1940
1941 amd_iommu_pd_alloc_bitmap[0] = 1;
1942
1943 spin_lock_init(&amd_iommu_pd_lock);
1944
1945
1946
1947
1948
1949 ret = init_iommu_all(ivrs_base);
1950 if (ret)
1951 goto out;
1952
1953 if (amd_iommu_irq_remap)
1954 amd_iommu_irq_remap = check_ioapic_information();
1955
1956 if (amd_iommu_irq_remap) {
1957
1958
1959
1960
1961 ret = -ENOMEM;
1962 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
1963 MAX_IRQS_PER_TABLE * sizeof(u32),
1964 IRQ_TABLE_ALIGNMENT,
1965 0, NULL);
1966 if (!amd_iommu_irq_cache)
1967 goto out;
1968
1969 irq_lookup_table = (void *)__get_free_pages(
1970 GFP_KERNEL | __GFP_ZERO,
1971 get_order(rlookup_table_size));
1972 if (!irq_lookup_table)
1973 goto out;
1974 }
1975
1976 ret = init_memory_definitions(ivrs_base);
1977 if (ret)
1978 goto out;
1979
1980
1981 init_device_table();
1982
1983out:
1984
1985 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
1986 ivrs_base = NULL;
1987
1988 return ret;
1989}
1990
1991static int amd_iommu_enable_interrupts(void)
1992{
1993 struct amd_iommu *iommu;
1994 int ret = 0;
1995
1996 for_each_iommu(iommu) {
1997 ret = iommu_init_msi(iommu);
1998 if (ret)
1999 goto out;
2000 }
2001
2002out:
2003 return ret;
2004}
2005
2006static bool detect_ivrs(void)
2007{
2008 struct acpi_table_header *ivrs_base;
2009 acpi_size ivrs_size;
2010 acpi_status status;
2011
2012 status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
2013 if (status == AE_NOT_FOUND)
2014 return false;
2015 else if (ACPI_FAILURE(status)) {
2016 const char *err = acpi_format_exception(status);
2017 pr_err("AMD-Vi: IVRS table error: %s\n", err);
2018 return false;
2019 }
2020
2021 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
2022
2023
2024 pci_request_acs();
2025
2026 return true;
2027}
2028
2029
2030
2031
2032
2033
2034
2035static int __init state_next(void)
2036{
2037 int ret = 0;
2038
2039 switch (init_state) {
2040 case IOMMU_START_STATE:
2041 if (!detect_ivrs()) {
2042 init_state = IOMMU_NOT_FOUND;
2043 ret = -ENODEV;
2044 } else {
2045 init_state = IOMMU_IVRS_DETECTED;
2046 }
2047 break;
2048 case IOMMU_IVRS_DETECTED:
2049 ret = early_amd_iommu_init();
2050 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
2051 break;
2052 case IOMMU_ACPI_FINISHED:
2053 early_enable_iommus();
2054 register_syscore_ops(&amd_iommu_syscore_ops);
2055 x86_platform.iommu_shutdown = disable_iommus;
2056 init_state = IOMMU_ENABLED;
2057 break;
2058 case IOMMU_ENABLED:
2059 ret = amd_iommu_init_pci();
2060 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2061 enable_iommus_v2();
2062 break;
2063 case IOMMU_PCI_INIT:
2064 ret = amd_iommu_enable_interrupts();
2065 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2066 break;
2067 case IOMMU_INTERRUPTS_EN:
2068 ret = amd_iommu_init_dma_ops();
2069 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2070 break;
2071 case IOMMU_DMA_OPS:
2072 init_state = IOMMU_INITIALIZED;
2073 break;
2074 case IOMMU_INITIALIZED:
2075
2076 break;
2077 case IOMMU_NOT_FOUND:
2078 case IOMMU_INIT_ERROR:
2079
2080 ret = -EINVAL;
2081 break;
2082 default:
2083
2084 BUG();
2085 }
2086
2087 return ret;
2088}
2089
2090static int __init iommu_go_to_state(enum iommu_init_state state)
2091{
2092 int ret = 0;
2093
2094 while (init_state != state) {
2095 ret = state_next();
2096 if (init_state == IOMMU_NOT_FOUND ||
2097 init_state == IOMMU_INIT_ERROR)
2098 break;
2099 }
2100
2101 return ret;
2102}
2103
2104#ifdef CONFIG_IRQ_REMAP
2105int __init amd_iommu_prepare(void)
2106{
2107 int ret;
2108
2109 amd_iommu_irq_remap = true;
2110
2111 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2112 if (ret)
2113 return ret;
2114 return amd_iommu_irq_remap ? 0 : -ENODEV;
2115}
2116
2117int __init amd_iommu_enable(void)
2118{
2119 int ret;
2120
2121 ret = iommu_go_to_state(IOMMU_ENABLED);
2122 if (ret)
2123 return ret;
2124
2125 irq_remapping_enabled = 1;
2126
2127 return 0;
2128}
2129
2130void amd_iommu_disable(void)
2131{
2132 amd_iommu_suspend();
2133}
2134
2135int amd_iommu_reenable(int mode)
2136{
2137 amd_iommu_resume();
2138
2139 return 0;
2140}
2141
2142int __init amd_iommu_enable_faulting(void)
2143{
2144
2145 return 0;
2146}
2147#endif
2148
2149
2150
2151
2152
2153
2154static int __init amd_iommu_init(void)
2155{
2156 int ret;
2157
2158 ret = iommu_go_to_state(IOMMU_INITIALIZED);
2159 if (ret) {
2160 free_dma_resources();
2161 if (!irq_remapping_enabled) {
2162 disable_iommus();
2163 free_on_init_error();
2164 } else {
2165 struct amd_iommu *iommu;
2166
2167 uninit_device_table_dma();
2168 for_each_iommu(iommu)
2169 iommu_flush_all_caches(iommu);
2170 }
2171 }
2172
2173 return ret;
2174}
2175
2176
2177
2178
2179
2180
2181
2182
2183int __init amd_iommu_detect(void)
2184{
2185 int ret;
2186
2187 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
2188 return -ENODEV;
2189
2190 if (amd_iommu_disabled)
2191 return -ENODEV;
2192
2193 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2194 if (ret)
2195 return ret;
2196
2197 amd_iommu_detected = true;
2198 iommu_detected = 1;
2199 x86_init.iommu.iommu_init = amd_iommu_init;
2200
2201 return 0;
2202}
2203
2204
2205
2206
2207
2208
2209
2210
2211static int __init parse_amd_iommu_dump(char *str)
2212{
2213 amd_iommu_dump = true;
2214
2215 return 1;
2216}
2217
2218static int __init parse_amd_iommu_options(char *str)
2219{
2220 for (; *str; ++str) {
2221 if (strncmp(str, "fullflush", 9) == 0)
2222 amd_iommu_unmap_flush = true;
2223 if (strncmp(str, "off", 3) == 0)
2224 amd_iommu_disabled = true;
2225 if (strncmp(str, "force_isolation", 15) == 0)
2226 amd_iommu_force_isolation = true;
2227 }
2228
2229 return 1;
2230}
2231
2232static int __init parse_ivrs_ioapic(char *str)
2233{
2234 unsigned int bus, dev, fn;
2235 int ret, id, i;
2236 u16 devid;
2237
2238 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2239
2240 if (ret != 4) {
2241 pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str);
2242 return 1;
2243 }
2244
2245 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
2246 pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
2247 str);
2248 return 1;
2249 }
2250
2251 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2252
2253 cmdline_maps = true;
2254 i = early_ioapic_map_size++;
2255 early_ioapic_map[i].id = id;
2256 early_ioapic_map[i].devid = devid;
2257 early_ioapic_map[i].cmd_line = true;
2258
2259 return 1;
2260}
2261
2262static int __init parse_ivrs_hpet(char *str)
2263{
2264 unsigned int bus, dev, fn;
2265 int ret, id, i;
2266 u16 devid;
2267
2268 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2269
2270 if (ret != 4) {
2271 pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str);
2272 return 1;
2273 }
2274
2275 if (early_hpet_map_size == EARLY_MAP_SIZE) {
2276 pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n",
2277 str);
2278 return 1;
2279 }
2280
2281 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2282
2283 cmdline_maps = true;
2284 i = early_hpet_map_size++;
2285 early_hpet_map[i].id = id;
2286 early_hpet_map[i].devid = devid;
2287 early_hpet_map[i].cmd_line = true;
2288
2289 return 1;
2290}
2291
2292__setup("amd_iommu_dump", parse_amd_iommu_dump);
2293__setup("amd_iommu=", parse_amd_iommu_options);
2294__setup("ivrs_ioapic", parse_ivrs_ioapic);
2295__setup("ivrs_hpet", parse_ivrs_hpet);
2296
2297IOMMU_INIT_FINISH(amd_iommu_detect,
2298 gart_iommu_hole_init,
2299 NULL,
2300 NULL);
2301
2302bool amd_iommu_v2_supported(void)
2303{
2304 return amd_iommu_v2_present;
2305}
2306EXPORT_SYMBOL(amd_iommu_v2_supported);
2307
2308
2309
2310
2311
2312
2313
2314
2315u8 amd_iommu_pc_get_max_banks(u16 devid)
2316{
2317 struct amd_iommu *iommu;
2318 u8 ret = 0;
2319
2320
2321 iommu = amd_iommu_rlookup_table[devid];
2322 if (iommu)
2323 ret = iommu->max_banks;
2324
2325 return ret;
2326}
2327EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
2328
2329bool amd_iommu_pc_supported(void)
2330{
2331 return amd_iommu_pc_present;
2332}
2333EXPORT_SYMBOL(amd_iommu_pc_supported);
2334
2335u8 amd_iommu_pc_get_max_counters(u16 devid)
2336{
2337 struct amd_iommu *iommu;
2338 u8 ret = 0;
2339
2340
2341 iommu = amd_iommu_rlookup_table[devid];
2342 if (iommu)
2343 ret = iommu->max_counters;
2344
2345 return ret;
2346}
2347EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
2348
2349int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
2350 u64 *value, bool is_write)
2351{
2352 struct amd_iommu *iommu;
2353 u32 offset;
2354 u32 max_offset_lim;
2355
2356
2357 if (!amd_iommu_pc_present)
2358 return -ENODEV;
2359
2360
2361 iommu = amd_iommu_rlookup_table[devid];
2362
2363
2364 if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7)))
2365 return -ENODEV;
2366
2367 offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
2368
2369
2370 max_offset_lim = (u32)(((0x40|iommu->max_banks) << 12) |
2371 (iommu->max_counters << 8) | 0x28);
2372 if ((offset < MMIO_CNTR_REG_OFFSET) ||
2373 (offset > max_offset_lim))
2374 return -EINVAL;
2375
2376 if (is_write) {
2377 writel((u32)*value, iommu->mmio_base + offset);
2378 writel((*value >> 32), iommu->mmio_base + offset + 4);
2379 } else {
2380 *value = readl(iommu->mmio_base + offset + 4);
2381 *value <<= 32;
2382 *value = readl(iommu->mmio_base + offset);
2383 }
2384
2385 return 0;
2386}
2387EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
2388