1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/pci.h>
21#include <linux/acpi.h>
22#include <linux/list.h>
23#include <linux/bitmap.h>
24#include <linux/slab.h>
25#include <linux/syscore_ops.h>
26#include <linux/interrupt.h>
27#include <linux/msi.h>
28#include <linux/amd-iommu.h>
29#include <linux/export.h>
30#include <linux/iommu.h>
31#include <linux/kmemleak.h>
32#include <linux/mem_encrypt.h>
33#include <asm/pci-direct.h>
34#include <asm/iommu.h>
35#include <asm/gart.h>
36#include <asm/x86_init.h>
37#include <asm/iommu_table.h>
38#include <asm/io_apic.h>
39#include <asm/irq_remapping.h>
40
41#include <linux/crash_dump.h>
42#include "amd_iommu_proto.h"
43#include "amd_iommu_types.h"
44#include "irq_remapping.h"
45
46
47
48
49#define IVRS_HEADER_LENGTH 48
50
51#define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
52#define ACPI_IVMD_TYPE_ALL 0x20
53#define ACPI_IVMD_TYPE 0x21
54#define ACPI_IVMD_TYPE_RANGE 0x22
55
56#define IVHD_DEV_ALL 0x01
57#define IVHD_DEV_SELECT 0x02
58#define IVHD_DEV_SELECT_RANGE_START 0x03
59#define IVHD_DEV_RANGE_END 0x04
60#define IVHD_DEV_ALIAS 0x42
61#define IVHD_DEV_ALIAS_RANGE 0x43
62#define IVHD_DEV_EXT_SELECT 0x46
63#define IVHD_DEV_EXT_SELECT_RANGE 0x47
64#define IVHD_DEV_SPECIAL 0x48
65#define IVHD_DEV_ACPI_HID 0xf0
66
67#define UID_NOT_PRESENT 0
68#define UID_IS_INTEGER 1
69#define UID_IS_CHARACTER 2
70
71#define IVHD_SPECIAL_IOAPIC 1
72#define IVHD_SPECIAL_HPET 2
73
74#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
75#define IVHD_FLAG_PASSPW_EN_MASK 0x02
76#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
77#define IVHD_FLAG_ISOC_EN_MASK 0x08
78
79#define IVMD_FLAG_EXCL_RANGE 0x08
80#define IVMD_FLAG_UNITY_MAP 0x01
81
82#define ACPI_DEVFLAG_INITPASS 0x01
83#define ACPI_DEVFLAG_EXTINT 0x02
84#define ACPI_DEVFLAG_NMI 0x04
85#define ACPI_DEVFLAG_SYSMGT1 0x10
86#define ACPI_DEVFLAG_SYSMGT2 0x20
87#define ACPI_DEVFLAG_LINT0 0x40
88#define ACPI_DEVFLAG_LINT1 0x80
89#define ACPI_DEVFLAG_ATSDIS 0x10000000
90
91#define LOOP_TIMEOUT 100000
92
93
94
95
96
97
98
99extern const struct iommu_ops amd_iommu_ops;
100
101
102
103
104
105struct ivhd_header {
106 u8 type;
107 u8 flags;
108 u16 length;
109 u16 devid;
110 u16 cap_ptr;
111 u64 mmio_phys;
112 u16 pci_seg;
113 u16 info;
114 u32 efr_attr;
115
116
117 u64 efr_reg;
118 u64 res;
119} __attribute__((packed));
120
121
122
123
124
125struct ivhd_entry {
126 u8 type;
127 u16 devid;
128 u8 flags;
129 u32 ext;
130 u32 hidh;
131 u64 cid;
132 u8 uidf;
133 u8 uidl;
134 u8 uid;
135} __attribute__((packed));
136
137
138
139
140
141struct ivmd_header {
142 u8 type;
143 u8 flags;
144 u16 length;
145 u16 devid;
146 u16 aux;
147 u64 resv;
148 u64 range_start;
149 u64 range_length;
150} __attribute__((packed));
151
152bool amd_iommu_dump;
153bool amd_iommu_irq_remap __read_mostly;
154
155int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
156
157static bool amd_iommu_detected;
158static bool __initdata amd_iommu_disabled;
159static int amd_iommu_target_ivhd_type;
160
161u16 amd_iommu_last_bdf;
162
163LIST_HEAD(amd_iommu_unity_map);
164
165bool amd_iommu_unmap_flush;
166
167LIST_HEAD(amd_iommu_list);
168
169
170
171struct amd_iommu *amd_iommus[MAX_IOMMUS];
172
173
174static int amd_iommus_present;
175
176
177bool amd_iommu_np_cache __read_mostly;
178bool amd_iommu_iotlb_sup __read_mostly = true;
179
180u32 amd_iommu_max_pasid __read_mostly = ~0;
181
182bool amd_iommu_v2_present __read_mostly;
183static bool amd_iommu_pc_present __read_mostly;
184
185bool amd_iommu_force_isolation __read_mostly;
186
187
188
189
190LIST_HEAD(amd_iommu_pd_list);
191spinlock_t amd_iommu_pd_lock;
192
193
194
195
196
197
198
199struct dev_table_entry *amd_iommu_dev_table;
200
201
202
203
204static struct dev_table_entry *old_dev_tbl_cpy;
205
206
207
208
209
210
211u16 *amd_iommu_alias_table;
212
213
214
215
216
217struct amd_iommu **amd_iommu_rlookup_table;
218EXPORT_SYMBOL(amd_iommu_rlookup_table);
219
220
221
222
223
224struct irq_remap_table **irq_lookup_table;
225
226
227
228
229
230unsigned long *amd_iommu_pd_alloc_bitmap;
231
232static u32 dev_table_size;
233static u32 alias_table_size;
234static u32 rlookup_table_size;
235
236enum iommu_init_state {
237 IOMMU_START_STATE,
238 IOMMU_IVRS_DETECTED,
239 IOMMU_ACPI_FINISHED,
240 IOMMU_ENABLED,
241 IOMMU_PCI_INIT,
242 IOMMU_INTERRUPTS_EN,
243 IOMMU_DMA_OPS,
244 IOMMU_INITIALIZED,
245 IOMMU_NOT_FOUND,
246 IOMMU_INIT_ERROR,
247 IOMMU_CMDLINE_DISABLED,
248};
249
250
251#define EARLY_MAP_SIZE 4
252static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
253static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
254static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
255
256static int __initdata early_ioapic_map_size;
257static int __initdata early_hpet_map_size;
258static int __initdata early_acpihid_map_size;
259
260static bool __initdata cmdline_maps;
261
262static enum iommu_init_state init_state = IOMMU_START_STATE;
263
264static int amd_iommu_enable_interrupts(void);
265static int __init iommu_go_to_state(enum iommu_init_state state);
266static void init_device_table_dma(void);
267
268static bool amd_iommu_pre_enabled = true;
269
270bool translation_pre_enabled(struct amd_iommu *iommu)
271{
272 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
273}
274EXPORT_SYMBOL(translation_pre_enabled);
275
276static void clear_translation_pre_enabled(struct amd_iommu *iommu)
277{
278 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
279}
280
281static void init_translation_status(struct amd_iommu *iommu)
282{
283 u32 ctrl;
284
285 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
286 if (ctrl & (1<<CONTROL_IOMMU_EN))
287 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
288}
289
290static inline void update_last_devid(u16 devid)
291{
292 if (devid > amd_iommu_last_bdf)
293 amd_iommu_last_bdf = devid;
294}
295
296static inline unsigned long tbl_size(int entry_size)
297{
298 unsigned shift = PAGE_SHIFT +
299 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
300
301 return 1UL << shift;
302}
303
304int amd_iommu_get_num_iommus(void)
305{
306 return amd_iommus_present;
307}
308
309
310
311static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
312{
313 u32 val;
314
315 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
316 pci_read_config_dword(iommu->dev, 0xfc, &val);
317 return val;
318}
319
320static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
321{
322 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
323 pci_write_config_dword(iommu->dev, 0xfc, val);
324 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
325}
326
327static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
328{
329 u32 val;
330
331 pci_write_config_dword(iommu->dev, 0xf0, address);
332 pci_read_config_dword(iommu->dev, 0xf4, &val);
333 return val;
334}
335
336static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
337{
338 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
339 pci_write_config_dword(iommu->dev, 0xf4, val);
340}
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355static void iommu_set_exclusion_range(struct amd_iommu *iommu)
356{
357 u64 start = iommu->exclusion_start & PAGE_MASK;
358 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
359 u64 entry;
360
361 if (!iommu->exclusion_start)
362 return;
363
364 entry = start | MMIO_EXCL_ENABLE_MASK;
365 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
366 &entry, sizeof(entry));
367
368 entry = limit;
369 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
370 &entry, sizeof(entry));
371}
372
373
374static void iommu_set_device_table(struct amd_iommu *iommu)
375{
376 u64 entry;
377
378 BUG_ON(iommu->mmio_base == NULL);
379
380 entry = iommu_virt_to_phys(amd_iommu_dev_table);
381 entry |= (dev_table_size >> 12) - 1;
382 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
383 &entry, sizeof(entry));
384}
385
386
387static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
388{
389 u32 ctrl;
390
391 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
392 ctrl |= (1 << bit);
393 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
394}
395
396static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
397{
398 u32 ctrl;
399
400 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
401 ctrl &= ~(1 << bit);
402 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
403}
404
405static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
406{
407 u32 ctrl;
408
409 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
410 ctrl &= ~CTRL_INV_TO_MASK;
411 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
412 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
413}
414
415
416static void iommu_enable(struct amd_iommu *iommu)
417{
418 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
419}
420
421static void iommu_disable(struct amd_iommu *iommu)
422{
423
424 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
425
426
427 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
428 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
429
430
431 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
432 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
433
434
435 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
436}
437
438
439
440
441
442static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
443{
444 if (!request_mem_region(address, end, "amd_iommu")) {
445 pr_err("AMD-Vi: Can not reserve memory region %llx-%llx for mmio\n",
446 address, end);
447 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
448 return NULL;
449 }
450
451 return (u8 __iomem *)ioremap_nocache(address, end);
452}
453
454static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
455{
456 if (iommu->mmio_base)
457 iounmap(iommu->mmio_base);
458 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
459}
460
461static inline u32 get_ivhd_header_size(struct ivhd_header *h)
462{
463 u32 size = 0;
464
465 switch (h->type) {
466 case 0x10:
467 size = 24;
468 break;
469 case 0x11:
470 case 0x40:
471 size = 40;
472 break;
473 }
474 return size;
475}
476
477
478
479
480
481
482
483
484
485
486
487
488
489static inline int ivhd_entry_length(u8 *ivhd)
490{
491 u32 type = ((struct ivhd_entry *)ivhd)->type;
492
493 if (type < 0x80) {
494 return 0x04 << (*ivhd >> 6);
495 } else if (type == IVHD_DEV_ACPI_HID) {
496
497 return *((u8 *)ivhd + 21) + 22;
498 }
499 return 0;
500}
501
502
503
504
505
506static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
507{
508 u8 *p = (void *)h, *end = (void *)h;
509 struct ivhd_entry *dev;
510
511 u32 ivhd_size = get_ivhd_header_size(h);
512
513 if (!ivhd_size) {
514 pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type);
515 return -EINVAL;
516 }
517
518 p += ivhd_size;
519 end += h->length;
520
521 while (p < end) {
522 dev = (struct ivhd_entry *)p;
523 switch (dev->type) {
524 case IVHD_DEV_ALL:
525
526 update_last_devid(0xffff);
527 break;
528 case IVHD_DEV_SELECT:
529 case IVHD_DEV_RANGE_END:
530 case IVHD_DEV_ALIAS:
531 case IVHD_DEV_EXT_SELECT:
532
533 update_last_devid(dev->devid);
534 break;
535 default:
536 break;
537 }
538 p += ivhd_entry_length(p);
539 }
540
541 WARN_ON(p != end);
542
543 return 0;
544}
545
546static int __init check_ivrs_checksum(struct acpi_table_header *table)
547{
548 int i;
549 u8 checksum = 0, *p = (u8 *)table;
550
551 for (i = 0; i < table->length; ++i)
552 checksum += p[i];
553 if (checksum != 0) {
554
555 pr_err(FW_BUG "AMD-Vi: IVRS invalid checksum\n");
556 return -ENODEV;
557 }
558
559 return 0;
560}
561
562
563
564
565
566
567static int __init find_last_devid_acpi(struct acpi_table_header *table)
568{
569 u8 *p = (u8 *)table, *end = (u8 *)table;
570 struct ivhd_header *h;
571
572 p += IVRS_HEADER_LENGTH;
573
574 end += table->length;
575 while (p < end) {
576 h = (struct ivhd_header *)p;
577 if (h->type == amd_iommu_target_ivhd_type) {
578 int ret = find_last_devid_from_ivhd(h);
579
580 if (ret)
581 return ret;
582 }
583 p += h->length;
584 }
585 WARN_ON(p != end);
586
587 return 0;
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604static int __init alloc_command_buffer(struct amd_iommu *iommu)
605{
606 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
607 get_order(CMD_BUFFER_SIZE));
608
609 return iommu->cmd_buf ? 0 : -ENOMEM;
610}
611
612
613
614
615
616void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
617{
618 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
619
620 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
621 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
622 iommu->cmd_buf_head = 0;
623 iommu->cmd_buf_tail = 0;
624
625 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
626}
627
628
629
630
631
632static void iommu_enable_command_buffer(struct amd_iommu *iommu)
633{
634 u64 entry;
635
636 BUG_ON(iommu->cmd_buf == NULL);
637
638 entry = iommu_virt_to_phys(iommu->cmd_buf);
639 entry |= MMIO_CMD_SIZE_512;
640
641 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
642 &entry, sizeof(entry));
643
644 amd_iommu_reset_cmd_buffer(iommu);
645}
646
647
648
649
650static void iommu_disable_command_buffer(struct amd_iommu *iommu)
651{
652 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
653}
654
655static void __init free_command_buffer(struct amd_iommu *iommu)
656{
657 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
658}
659
660
661static int __init alloc_event_buffer(struct amd_iommu *iommu)
662{
663 iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
664 get_order(EVT_BUFFER_SIZE));
665
666 return iommu->evt_buf ? 0 : -ENOMEM;
667}
668
669static void iommu_enable_event_buffer(struct amd_iommu *iommu)
670{
671 u64 entry;
672
673 BUG_ON(iommu->evt_buf == NULL);
674
675 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
676
677 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
678 &entry, sizeof(entry));
679
680
681 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
682 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
683
684 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
685}
686
687
688
689
690static void iommu_disable_event_buffer(struct amd_iommu *iommu)
691{
692 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
693}
694
695static void __init free_event_buffer(struct amd_iommu *iommu)
696{
697 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
698}
699
700
701static int __init alloc_ppr_log(struct amd_iommu *iommu)
702{
703 iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
704 get_order(PPR_LOG_SIZE));
705
706 return iommu->ppr_log ? 0 : -ENOMEM;
707}
708
709static void iommu_enable_ppr_log(struct amd_iommu *iommu)
710{
711 u64 entry;
712
713 if (iommu->ppr_log == NULL)
714 return;
715
716 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
717
718 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
719 &entry, sizeof(entry));
720
721
722 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
723 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
724
725 iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
726 iommu_feature_enable(iommu, CONTROL_PPR_EN);
727}
728
729static void __init free_ppr_log(struct amd_iommu *iommu)
730{
731 if (iommu->ppr_log == NULL)
732 return;
733
734 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
735}
736
737static void free_ga_log(struct amd_iommu *iommu)
738{
739#ifdef CONFIG_IRQ_REMAP
740 if (iommu->ga_log)
741 free_pages((unsigned long)iommu->ga_log,
742 get_order(GA_LOG_SIZE));
743 if (iommu->ga_log_tail)
744 free_pages((unsigned long)iommu->ga_log_tail,
745 get_order(8));
746#endif
747}
748
749static int iommu_ga_log_enable(struct amd_iommu *iommu)
750{
751#ifdef CONFIG_IRQ_REMAP
752 u32 status, i;
753
754 if (!iommu->ga_log)
755 return -EINVAL;
756
757 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
758
759
760 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
761 return 0;
762
763 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
764 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
765
766 for (i = 0; i < LOOP_TIMEOUT; ++i) {
767 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
768 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
769 break;
770 }
771
772 if (i >= LOOP_TIMEOUT)
773 return -EINVAL;
774#endif
775 return 0;
776}
777
778#ifdef CONFIG_IRQ_REMAP
779static int iommu_init_ga_log(struct amd_iommu *iommu)
780{
781 u64 entry;
782
783 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
784 return 0;
785
786 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
787 get_order(GA_LOG_SIZE));
788 if (!iommu->ga_log)
789 goto err_out;
790
791 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
792 get_order(8));
793 if (!iommu->ga_log_tail)
794 goto err_out;
795
796 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
797 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
798 &entry, sizeof(entry));
799 entry = (iommu_virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL;
800 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
801 &entry, sizeof(entry));
802 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
803 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
804
805 return 0;
806err_out:
807 free_ga_log(iommu);
808 return -EINVAL;
809}
810#endif
811
812static int iommu_init_ga(struct amd_iommu *iommu)
813{
814 int ret = 0;
815
816#ifdef CONFIG_IRQ_REMAP
817
818
819
820 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
821 !iommu_feature(iommu, FEATURE_GAM_VAPIC))
822 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
823
824 ret = iommu_init_ga_log(iommu);
825#endif
826
827 return ret;
828}
829
830static void iommu_enable_gt(struct amd_iommu *iommu)
831{
832 if (!iommu_feature(iommu, FEATURE_GT))
833 return;
834
835 iommu_feature_enable(iommu, CONTROL_GT_EN);
836}
837
838
839static void set_dev_entry_bit(u16 devid, u8 bit)
840{
841 int i = (bit >> 6) & 0x03;
842 int _bit = bit & 0x3f;
843
844 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
845}
846
847static int get_dev_entry_bit(u16 devid, u8 bit)
848{
849 int i = (bit >> 6) & 0x03;
850 int _bit = bit & 0x3f;
851
852 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
853}
854
855
856static bool copy_device_table(void)
857{
858 u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
859 struct dev_table_entry *old_devtb = NULL;
860 u32 lo, hi, devid, old_devtb_size;
861 phys_addr_t old_devtb_phys;
862 struct amd_iommu *iommu;
863 u16 dom_id, dte_v, irq_v;
864 gfp_t gfp_flag;
865 u64 tmp;
866
867 if (!amd_iommu_pre_enabled)
868 return false;
869
870 pr_warn("Translation is already enabled - trying to copy translation structures\n");
871 for_each_iommu(iommu) {
872
873 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
874 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
875 entry = (((u64) hi) << 32) + lo;
876 if (last_entry && last_entry != entry) {
877 pr_err("IOMMU:%d should use the same dev table as others!\n",
878 iommu->index);
879 return false;
880 }
881 last_entry = entry;
882
883 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
884 if (old_devtb_size != dev_table_size) {
885 pr_err("The device table size of IOMMU:%d is not expected!\n",
886 iommu->index);
887 return false;
888 }
889 }
890
891 old_devtb_phys = entry & PAGE_MASK;
892 if (old_devtb_phys >= 0x100000000ULL) {
893 pr_err("The address of old device table is above 4G, not trustworthy!\n");
894 return false;
895 }
896 old_devtb = memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
897 if (!old_devtb)
898 return false;
899
900 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
901 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
902 get_order(dev_table_size));
903 if (old_dev_tbl_cpy == NULL) {
904 pr_err("Failed to allocate memory for copying old device table!\n");
905 return false;
906 }
907
908 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
909 old_dev_tbl_cpy[devid] = old_devtb[devid];
910 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
911 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
912
913 if (dte_v && dom_id) {
914 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
915 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
916 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
917
918 if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
919 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
920 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
921 old_dev_tbl_cpy[devid].data[1] &= ~tmp;
922 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
923 tmp |= DTE_FLAG_GV;
924 old_dev_tbl_cpy[devid].data[0] &= ~tmp;
925 }
926 }
927
928 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
929 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
930 int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK;
931 if (irq_v && (int_ctl || int_tab_len)) {
932 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
933 (int_tab_len != DTE_IRQ_TABLE_LEN)) {
934 pr_err("Wrong old irq remapping flag: %#x\n", devid);
935 return false;
936 }
937
938 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
939 }
940 }
941 memunmap(old_devtb);
942
943 return true;
944}
945
946void amd_iommu_apply_erratum_63(u16 devid)
947{
948 int sysmgt;
949
950 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
951 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
952
953 if (sysmgt == 0x01)
954 set_dev_entry_bit(devid, DEV_ENTRY_IW);
955}
956
957
958static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
959{
960 amd_iommu_rlookup_table[devid] = iommu;
961}
962
963
964
965
966
967static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
968 u16 devid, u32 flags, u32 ext_flags)
969{
970 if (flags & ACPI_DEVFLAG_INITPASS)
971 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
972 if (flags & ACPI_DEVFLAG_EXTINT)
973 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
974 if (flags & ACPI_DEVFLAG_NMI)
975 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
976 if (flags & ACPI_DEVFLAG_SYSMGT1)
977 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
978 if (flags & ACPI_DEVFLAG_SYSMGT2)
979 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
980 if (flags & ACPI_DEVFLAG_LINT0)
981 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
982 if (flags & ACPI_DEVFLAG_LINT1)
983 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
984
985 amd_iommu_apply_erratum_63(devid);
986
987 set_iommu_for_device(iommu, devid);
988}
989
990static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
991{
992 struct devid_map *entry;
993 struct list_head *list;
994
995 if (type == IVHD_SPECIAL_IOAPIC)
996 list = &ioapic_map;
997 else if (type == IVHD_SPECIAL_HPET)
998 list = &hpet_map;
999 else
1000 return -EINVAL;
1001
1002 list_for_each_entry(entry, list, list) {
1003 if (!(entry->id == id && entry->cmd_line))
1004 continue;
1005
1006 pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n",
1007 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1008
1009 *devid = entry->devid;
1010
1011 return 0;
1012 }
1013
1014 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1015 if (!entry)
1016 return -ENOMEM;
1017
1018 entry->id = id;
1019 entry->devid = *devid;
1020 entry->cmd_line = cmd_line;
1021
1022 list_add_tail(&entry->list, list);
1023
1024 return 0;
1025}
1026
1027static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
1028 bool cmd_line)
1029{
1030 struct acpihid_map_entry *entry;
1031 struct list_head *list = &acpihid_map;
1032
1033 list_for_each_entry(entry, list, list) {
1034 if (strcmp(entry->hid, hid) ||
1035 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1036 !entry->cmd_line)
1037 continue;
1038
1039 pr_info("AMD-Vi: Command-line override for hid:%s uid:%s\n",
1040 hid, uid);
1041 *devid = entry->devid;
1042 return 0;
1043 }
1044
1045 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1046 if (!entry)
1047 return -ENOMEM;
1048
1049 memcpy(entry->uid, uid, strlen(uid));
1050 memcpy(entry->hid, hid, strlen(hid));
1051 entry->devid = *devid;
1052 entry->cmd_line = cmd_line;
1053 entry->root_devid = (entry->devid & (~0x7));
1054
1055 pr_info("AMD-Vi:%s, add hid:%s, uid:%s, rdevid:%d\n",
1056 entry->cmd_line ? "cmd" : "ivrs",
1057 entry->hid, entry->uid, entry->root_devid);
1058
1059 list_add_tail(&entry->list, list);
1060 return 0;
1061}
1062
1063static int __init add_early_maps(void)
1064{
1065 int i, ret;
1066
1067 for (i = 0; i < early_ioapic_map_size; ++i) {
1068 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1069 early_ioapic_map[i].id,
1070 &early_ioapic_map[i].devid,
1071 early_ioapic_map[i].cmd_line);
1072 if (ret)
1073 return ret;
1074 }
1075
1076 for (i = 0; i < early_hpet_map_size; ++i) {
1077 ret = add_special_device(IVHD_SPECIAL_HPET,
1078 early_hpet_map[i].id,
1079 &early_hpet_map[i].devid,
1080 early_hpet_map[i].cmd_line);
1081 if (ret)
1082 return ret;
1083 }
1084
1085 for (i = 0; i < early_acpihid_map_size; ++i) {
1086 ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1087 early_acpihid_map[i].uid,
1088 &early_acpihid_map[i].devid,
1089 early_acpihid_map[i].cmd_line);
1090 if (ret)
1091 return ret;
1092 }
1093
1094 return 0;
1095}
1096
1097
1098
1099
1100
1101static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
1102{
1103 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1104
1105 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
1106 return;
1107
1108 if (iommu) {
1109
1110
1111
1112
1113
1114 set_dev_entry_bit(devid, DEV_ENTRY_EX);
1115 iommu->exclusion_start = m->range_start;
1116 iommu->exclusion_length = m->range_length;
1117 }
1118}
1119
1120
1121
1122
1123
1124static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1125 struct ivhd_header *h)
1126{
1127 u8 *p = (u8 *)h;
1128 u8 *end = p, flags = 0;
1129 u16 devid = 0, devid_start = 0, devid_to = 0;
1130 u32 dev_i, ext_flags = 0;
1131 bool alias = false;
1132 struct ivhd_entry *e;
1133 u32 ivhd_size;
1134 int ret;
1135
1136
1137 ret = add_early_maps();
1138 if (ret)
1139 return ret;
1140
1141
1142
1143
1144 iommu->acpi_flags = h->flags;
1145
1146
1147
1148
1149 ivhd_size = get_ivhd_header_size(h);
1150 if (!ivhd_size) {
1151 pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type);
1152 return -EINVAL;
1153 }
1154
1155 p += ivhd_size;
1156
1157 end += h->length;
1158
1159
1160 while (p < end) {
1161 e = (struct ivhd_entry *)p;
1162 switch (e->type) {
1163 case IVHD_DEV_ALL:
1164
1165 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
1166
1167 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1168 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1169 break;
1170 case IVHD_DEV_SELECT:
1171
1172 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1173 "flags: %02x\n",
1174 PCI_BUS_NUM(e->devid),
1175 PCI_SLOT(e->devid),
1176 PCI_FUNC(e->devid),
1177 e->flags);
1178
1179 devid = e->devid;
1180 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1181 break;
1182 case IVHD_DEV_SELECT_RANGE_START:
1183
1184 DUMP_printk(" DEV_SELECT_RANGE_START\t "
1185 "devid: %02x:%02x.%x flags: %02x\n",
1186 PCI_BUS_NUM(e->devid),
1187 PCI_SLOT(e->devid),
1188 PCI_FUNC(e->devid),
1189 e->flags);
1190
1191 devid_start = e->devid;
1192 flags = e->flags;
1193 ext_flags = 0;
1194 alias = false;
1195 break;
1196 case IVHD_DEV_ALIAS:
1197
1198 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1199 "flags: %02x devid_to: %02x:%02x.%x\n",
1200 PCI_BUS_NUM(e->devid),
1201 PCI_SLOT(e->devid),
1202 PCI_FUNC(e->devid),
1203 e->flags,
1204 PCI_BUS_NUM(e->ext >> 8),
1205 PCI_SLOT(e->ext >> 8),
1206 PCI_FUNC(e->ext >> 8));
1207
1208 devid = e->devid;
1209 devid_to = e->ext >> 8;
1210 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
1211 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1212 amd_iommu_alias_table[devid] = devid_to;
1213 break;
1214 case IVHD_DEV_ALIAS_RANGE:
1215
1216 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1217 "devid: %02x:%02x.%x flags: %02x "
1218 "devid_to: %02x:%02x.%x\n",
1219 PCI_BUS_NUM(e->devid),
1220 PCI_SLOT(e->devid),
1221 PCI_FUNC(e->devid),
1222 e->flags,
1223 PCI_BUS_NUM(e->ext >> 8),
1224 PCI_SLOT(e->ext >> 8),
1225 PCI_FUNC(e->ext >> 8));
1226
1227 devid_start = e->devid;
1228 flags = e->flags;
1229 devid_to = e->ext >> 8;
1230 ext_flags = 0;
1231 alias = true;
1232 break;
1233 case IVHD_DEV_EXT_SELECT:
1234
1235 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1236 "flags: %02x ext: %08x\n",
1237 PCI_BUS_NUM(e->devid),
1238 PCI_SLOT(e->devid),
1239 PCI_FUNC(e->devid),
1240 e->flags, e->ext);
1241
1242 devid = e->devid;
1243 set_dev_entry_from_acpi(iommu, devid, e->flags,
1244 e->ext);
1245 break;
1246 case IVHD_DEV_EXT_SELECT_RANGE:
1247
1248 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1249 "%02x:%02x.%x flags: %02x ext: %08x\n",
1250 PCI_BUS_NUM(e->devid),
1251 PCI_SLOT(e->devid),
1252 PCI_FUNC(e->devid),
1253 e->flags, e->ext);
1254
1255 devid_start = e->devid;
1256 flags = e->flags;
1257 ext_flags = e->ext;
1258 alias = false;
1259 break;
1260 case IVHD_DEV_RANGE_END:
1261
1262 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
1263 PCI_BUS_NUM(e->devid),
1264 PCI_SLOT(e->devid),
1265 PCI_FUNC(e->devid));
1266
1267 devid = e->devid;
1268 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1269 if (alias) {
1270 amd_iommu_alias_table[dev_i] = devid_to;
1271 set_dev_entry_from_acpi(iommu,
1272 devid_to, flags, ext_flags);
1273 }
1274 set_dev_entry_from_acpi(iommu, dev_i,
1275 flags, ext_flags);
1276 }
1277 break;
1278 case IVHD_DEV_SPECIAL: {
1279 u8 handle, type;
1280 const char *var;
1281 u16 devid;
1282 int ret;
1283
1284 handle = e->ext & 0xff;
1285 devid = (e->ext >> 8) & 0xffff;
1286 type = (e->ext >> 24) & 0xff;
1287
1288 if (type == IVHD_SPECIAL_IOAPIC)
1289 var = "IOAPIC";
1290 else if (type == IVHD_SPECIAL_HPET)
1291 var = "HPET";
1292 else
1293 var = "UNKNOWN";
1294
1295 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1296 var, (int)handle,
1297 PCI_BUS_NUM(devid),
1298 PCI_SLOT(devid),
1299 PCI_FUNC(devid));
1300
1301 ret = add_special_device(type, handle, &devid, false);
1302 if (ret)
1303 return ret;
1304
1305
1306
1307
1308
1309
1310 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1311
1312 break;
1313 }
1314 case IVHD_DEV_ACPI_HID: {
1315 u16 devid;
1316 u8 hid[ACPIHID_HID_LEN] = {0};
1317 u8 uid[ACPIHID_UID_LEN] = {0};
1318 int ret;
1319
1320 if (h->type != 0x40) {
1321 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1322 e->type);
1323 break;
1324 }
1325
1326 memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
1327 hid[ACPIHID_HID_LEN - 1] = '\0';
1328
1329 if (!(*hid)) {
1330 pr_err(FW_BUG "Invalid HID.\n");
1331 break;
1332 }
1333
1334 switch (e->uidf) {
1335 case UID_NOT_PRESENT:
1336
1337 if (e->uidl != 0)
1338 pr_warn(FW_BUG "Invalid UID length.\n");
1339
1340 break;
1341 case UID_IS_INTEGER:
1342
1343 sprintf(uid, "%d", e->uid);
1344
1345 break;
1346 case UID_IS_CHARACTER:
1347
1348 memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1);
1349 uid[ACPIHID_UID_LEN - 1] = '\0';
1350
1351 break;
1352 default:
1353 break;
1354 }
1355
1356 devid = e->devid;
1357 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1358 hid, uid,
1359 PCI_BUS_NUM(devid),
1360 PCI_SLOT(devid),
1361 PCI_FUNC(devid));
1362
1363 flags = e->flags;
1364
1365 ret = add_acpi_hid_device(hid, uid, &devid, false);
1366 if (ret)
1367 return ret;
1368
1369
1370
1371
1372
1373
1374 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1375
1376 break;
1377 }
1378 default:
1379 break;
1380 }
1381
1382 p += ivhd_entry_length(p);
1383 }
1384
1385 return 0;
1386}
1387
1388static void __init free_iommu_one(struct amd_iommu *iommu)
1389{
1390 free_command_buffer(iommu);
1391 free_event_buffer(iommu);
1392 free_ppr_log(iommu);
1393 free_ga_log(iommu);
1394 iommu_unmap_mmio_space(iommu);
1395}
1396
1397static void __init free_iommu_all(void)
1398{
1399 struct amd_iommu *iommu, *next;
1400
1401 for_each_iommu_safe(iommu, next) {
1402 list_del(&iommu->list);
1403 free_iommu_one(iommu);
1404 kfree(iommu);
1405 }
1406}
1407
1408
1409
1410
1411
1412
1413
1414static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1415{
1416 u32 value;
1417
1418 if ((boot_cpu_data.x86 != 0x15) ||
1419 (boot_cpu_data.x86_model < 0x10) ||
1420 (boot_cpu_data.x86_model > 0x1f))
1421 return;
1422
1423 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1424 pci_read_config_dword(iommu->dev, 0xf4, &value);
1425
1426 if (value & BIT(2))
1427 return;
1428
1429
1430 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1431
1432 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1433 pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
1434 dev_name(&iommu->dev->dev));
1435
1436
1437 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1438}
1439
1440
1441
1442
1443
1444
1445
1446static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1447{
1448 u32 value;
1449
1450 if ((boot_cpu_data.x86 != 0x15) ||
1451 (boot_cpu_data.x86_model < 0x30) ||
1452 (boot_cpu_data.x86_model > 0x3f))
1453 return;
1454
1455
1456 value = iommu_read_l2(iommu, 0x47);
1457
1458 if (value & BIT(0))
1459 return;
1460
1461
1462 iommu_write_l2(iommu, 0x47, value | BIT(0));
1463
1464 pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
1465 dev_name(&iommu->dev->dev));
1466}
1467
1468
1469
1470
1471
1472
1473static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1474{
1475 int ret;
1476
1477 raw_spin_lock_init(&iommu->lock);
1478
1479
1480 list_add_tail(&iommu->list, &amd_iommu_list);
1481 iommu->index = amd_iommus_present++;
1482
1483 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1484 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
1485 return -ENOSYS;
1486 }
1487
1488
1489 amd_iommus[iommu->index] = iommu;
1490
1491
1492
1493
1494 iommu->devid = h->devid;
1495 iommu->cap_ptr = h->cap_ptr;
1496 iommu->pci_seg = h->pci_seg;
1497 iommu->mmio_phys = h->mmio_phys;
1498
1499 switch (h->type) {
1500 case 0x10:
1501
1502 if ((h->efr_attr != 0) &&
1503 ((h->efr_attr & (0xF << 13)) != 0) &&
1504 ((h->efr_attr & (0x3F << 17)) != 0))
1505 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1506 else
1507 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1508 if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1509 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1510 break;
1511 case 0x11:
1512 case 0x40:
1513 if (h->efr_reg & (1 << 9))
1514 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1515 else
1516 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1517 if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
1518 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1519 break;
1520 default:
1521 return -EINVAL;
1522 }
1523
1524 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1525 iommu->mmio_phys_end);
1526 if (!iommu->mmio_base)
1527 return -ENOMEM;
1528
1529 if (alloc_command_buffer(iommu))
1530 return -ENOMEM;
1531
1532 if (alloc_event_buffer(iommu))
1533 return -ENOMEM;
1534
1535 iommu->int_enabled = false;
1536
1537 init_translation_status(iommu);
1538 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1539 iommu_disable(iommu);
1540 clear_translation_pre_enabled(iommu);
1541 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1542 iommu->index);
1543 }
1544 if (amd_iommu_pre_enabled)
1545 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1546
1547 ret = init_iommu_from_acpi(iommu, h);
1548 if (ret)
1549 return ret;
1550
1551 ret = amd_iommu_create_irq_domain(iommu);
1552 if (ret)
1553 return ret;
1554
1555
1556
1557
1558
1559 amd_iommu_rlookup_table[iommu->devid] = NULL;
1560
1561 return 0;
1562}
1563
1564
1565
1566
1567
1568
1569
1570static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1571{
1572 u8 *base = (u8 *)ivrs;
1573 struct ivhd_header *ivhd = (struct ivhd_header *)
1574 (base + IVRS_HEADER_LENGTH);
1575 u8 last_type = ivhd->type;
1576 u16 devid = ivhd->devid;
1577
1578 while (((u8 *)ivhd - base < ivrs->length) &&
1579 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1580 u8 *p = (u8 *) ivhd;
1581
1582 if (ivhd->devid == devid)
1583 last_type = ivhd->type;
1584 ivhd = (struct ivhd_header *)(p + ivhd->length);
1585 }
1586
1587 return last_type;
1588}
1589
1590
1591
1592
1593
1594static int __init init_iommu_all(struct acpi_table_header *table)
1595{
1596 u8 *p = (u8 *)table, *end = (u8 *)table;
1597 struct ivhd_header *h;
1598 struct amd_iommu *iommu;
1599 int ret;
1600
1601 end += table->length;
1602 p += IVRS_HEADER_LENGTH;
1603
1604 while (p < end) {
1605 h = (struct ivhd_header *)p;
1606 if (*p == amd_iommu_target_ivhd_type) {
1607
1608 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1609 "seg: %d flags: %01x info %04x\n",
1610 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1611 PCI_FUNC(h->devid), h->cap_ptr,
1612 h->pci_seg, h->flags, h->info);
1613 DUMP_printk(" mmio-addr: %016llx\n",
1614 h->mmio_phys);
1615
1616 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1617 if (iommu == NULL)
1618 return -ENOMEM;
1619
1620 ret = init_iommu_one(iommu, h);
1621 if (ret)
1622 return ret;
1623 }
1624 p += h->length;
1625
1626 }
1627 WARN_ON(p != end);
1628
1629 return 0;
1630}
1631
1632static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
1633 u8 fxn, u64 *value, bool is_write);
1634
1635static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1636{
1637 u64 val = 0xabcd, val2 = 0;
1638
1639 if (!iommu_feature(iommu, FEATURE_PC))
1640 return;
1641
1642 amd_iommu_pc_present = true;
1643
1644
1645 if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
1646 (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
1647 (val != val2)) {
1648 pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
1649 amd_iommu_pc_present = false;
1650 return;
1651 }
1652
1653 pr_info("AMD-Vi: IOMMU performance counters supported\n");
1654
1655 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1656 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1657 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1658}
1659
1660static ssize_t amd_iommu_show_cap(struct device *dev,
1661 struct device_attribute *attr,
1662 char *buf)
1663{
1664 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1665 return sprintf(buf, "%x\n", iommu->cap);
1666}
1667static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1668
1669static ssize_t amd_iommu_show_features(struct device *dev,
1670 struct device_attribute *attr,
1671 char *buf)
1672{
1673 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1674 return sprintf(buf, "%llx\n", iommu->features);
1675}
1676static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1677
1678static struct attribute *amd_iommu_attrs[] = {
1679 &dev_attr_cap.attr,
1680 &dev_attr_features.attr,
1681 NULL,
1682};
1683
1684static struct attribute_group amd_iommu_group = {
1685 .name = "amd-iommu",
1686 .attrs = amd_iommu_attrs,
1687};
1688
1689static const struct attribute_group *amd_iommu_groups[] = {
1690 &amd_iommu_group,
1691 NULL,
1692};
1693
1694static int iommu_init_pci(struct amd_iommu *iommu)
1695{
1696 int cap_ptr = iommu->cap_ptr;
1697 u32 range, misc, low, high;
1698 int ret;
1699
1700 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
1701 iommu->devid & 0xff);
1702 if (!iommu->dev)
1703 return -ENODEV;
1704
1705
1706 iommu->dev->match_driver = false;
1707
1708 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1709 &iommu->cap);
1710 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
1711 &range);
1712 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
1713 &misc);
1714
1715 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1716 amd_iommu_iotlb_sup = false;
1717
1718
1719 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
1720 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
1721
1722 iommu->features = ((u64)high << 32) | low;
1723
1724 if (iommu_feature(iommu, FEATURE_GT)) {
1725 int glxval;
1726 u32 max_pasid;
1727 u64 pasmax;
1728
1729 pasmax = iommu->features & FEATURE_PASID_MASK;
1730 pasmax >>= FEATURE_PASID_SHIFT;
1731 max_pasid = (1 << (pasmax + 1)) - 1;
1732
1733 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1734
1735 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
1736
1737 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1738 glxval >>= FEATURE_GLXVAL_SHIFT;
1739
1740 if (amd_iommu_max_glx_val == -1)
1741 amd_iommu_max_glx_val = glxval;
1742 else
1743 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1744 }
1745
1746 if (iommu_feature(iommu, FEATURE_GT) &&
1747 iommu_feature(iommu, FEATURE_PPR)) {
1748 iommu->is_iommu_v2 = true;
1749 amd_iommu_v2_present = true;
1750 }
1751
1752 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1753 return -ENOMEM;
1754
1755 ret = iommu_init_ga(iommu);
1756 if (ret)
1757 return ret;
1758
1759 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1760 amd_iommu_np_cache = true;
1761
1762 init_iommu_perf_ctr(iommu);
1763
1764 if (is_rd890_iommu(iommu->dev)) {
1765 int i, j;
1766
1767 iommu->root_pdev =
1768 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
1769 PCI_DEVFN(0, 0));
1770
1771
1772
1773
1774
1775
1776 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1777 &iommu->stored_addr_lo);
1778 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1779 &iommu->stored_addr_hi);
1780
1781
1782 iommu->stored_addr_lo &= ~1;
1783
1784 for (i = 0; i < 6; i++)
1785 for (j = 0; j < 0x12; j++)
1786 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1787
1788 for (i = 0; i < 0x83; i++)
1789 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1790 }
1791
1792 amd_iommu_erratum_746_workaround(iommu);
1793 amd_iommu_ats_write_check_workaround(iommu);
1794
1795 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1796 amd_iommu_groups, "ivhd%d", iommu->index);
1797 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
1798 iommu_device_register(&iommu->iommu);
1799
1800 return pci_enable_device(iommu->dev);
1801}
1802
1803static void print_iommu_info(void)
1804{
1805 static const char * const feat_str[] = {
1806 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1807 "IA", "GA", "HE", "PC"
1808 };
1809 struct amd_iommu *iommu;
1810
1811 for_each_iommu(iommu) {
1812 int i;
1813
1814 pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n",
1815 dev_name(&iommu->dev->dev), iommu->cap_ptr);
1816
1817 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1818 pr_info("AMD-Vi: Extended features (%#llx):\n",
1819 iommu->features);
1820 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1821 if (iommu_feature(iommu, (1ULL << i)))
1822 pr_cont(" %s", feat_str[i]);
1823 }
1824
1825 if (iommu->features & FEATURE_GAM_VAPIC)
1826 pr_cont(" GA_vAPIC");
1827
1828 pr_cont("\n");
1829 }
1830 }
1831 if (irq_remapping_enabled) {
1832 pr_info("AMD-Vi: Interrupt remapping enabled\n");
1833 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
1834 pr_info("AMD-Vi: virtual APIC enabled\n");
1835 }
1836}
1837
1838static int __init amd_iommu_init_pci(void)
1839{
1840 struct amd_iommu *iommu;
1841 int ret = 0;
1842
1843 for_each_iommu(iommu) {
1844 ret = iommu_init_pci(iommu);
1845 if (ret)
1846 break;
1847 }
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859 ret = amd_iommu_init_api();
1860
1861 init_device_table_dma();
1862
1863 for_each_iommu(iommu)
1864 iommu_flush_all_caches(iommu);
1865
1866 if (!ret)
1867 print_iommu_info();
1868
1869 return ret;
1870}
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881static int iommu_setup_msi(struct amd_iommu *iommu)
1882{
1883 int r;
1884
1885 r = pci_enable_msi(iommu->dev);
1886 if (r)
1887 return r;
1888
1889 r = request_threaded_irq(iommu->dev->irq,
1890 amd_iommu_int_handler,
1891 amd_iommu_int_thread,
1892 0, "AMD-Vi",
1893 iommu);
1894
1895 if (r) {
1896 pci_disable_msi(iommu->dev);
1897 return r;
1898 }
1899
1900 iommu->int_enabled = true;
1901
1902 return 0;
1903}
1904
1905static int iommu_init_msi(struct amd_iommu *iommu)
1906{
1907 int ret;
1908
1909 if (iommu->int_enabled)
1910 goto enable_faults;
1911
1912 if (iommu->dev->msi_cap)
1913 ret = iommu_setup_msi(iommu);
1914 else
1915 ret = -ENODEV;
1916
1917 if (ret)
1918 return ret;
1919
1920enable_faults:
1921 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1922
1923 if (iommu->ppr_log != NULL)
1924 iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
1925
1926 iommu_ga_log_enable(iommu);
1927
1928 return 0;
1929}
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939static void __init free_unity_maps(void)
1940{
1941 struct unity_map_entry *entry, *next;
1942
1943 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
1944 list_del(&entry->list);
1945 kfree(entry);
1946 }
1947}
1948
1949
1950static int __init init_exclusion_range(struct ivmd_header *m)
1951{
1952 int i;
1953
1954 switch (m->type) {
1955 case ACPI_IVMD_TYPE:
1956 set_device_exclusion_range(m->devid, m);
1957 break;
1958 case ACPI_IVMD_TYPE_ALL:
1959 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1960 set_device_exclusion_range(i, m);
1961 break;
1962 case ACPI_IVMD_TYPE_RANGE:
1963 for (i = m->devid; i <= m->aux; ++i)
1964 set_device_exclusion_range(i, m);
1965 break;
1966 default:
1967 break;
1968 }
1969
1970 return 0;
1971}
1972
1973
1974static int __init init_unity_map_range(struct ivmd_header *m)
1975{
1976 struct unity_map_entry *e = NULL;
1977 char *s;
1978
1979 e = kzalloc(sizeof(*e), GFP_KERNEL);
1980 if (e == NULL)
1981 return -ENOMEM;
1982
1983 switch (m->type) {
1984 default:
1985 kfree(e);
1986 return 0;
1987 case ACPI_IVMD_TYPE:
1988 s = "IVMD_TYPEi\t\t\t";
1989 e->devid_start = e->devid_end = m->devid;
1990 break;
1991 case ACPI_IVMD_TYPE_ALL:
1992 s = "IVMD_TYPE_ALL\t\t";
1993 e->devid_start = 0;
1994 e->devid_end = amd_iommu_last_bdf;
1995 break;
1996 case ACPI_IVMD_TYPE_RANGE:
1997 s = "IVMD_TYPE_RANGE\t\t";
1998 e->devid_start = m->devid;
1999 e->devid_end = m->aux;
2000 break;
2001 }
2002 e->address_start = PAGE_ALIGN(m->range_start);
2003 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2004 e->prot = m->flags >> 1;
2005
2006 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
2007 " range_start: %016llx range_end: %016llx flags: %x\n", s,
2008 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2009 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
2010 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2011 e->address_start, e->address_end, m->flags);
2012
2013 list_add_tail(&e->list, &amd_iommu_unity_map);
2014
2015 return 0;
2016}
2017
2018
2019static int __init init_memory_definitions(struct acpi_table_header *table)
2020{
2021 u8 *p = (u8 *)table, *end = (u8 *)table;
2022 struct ivmd_header *m;
2023
2024 end += table->length;
2025 p += IVRS_HEADER_LENGTH;
2026
2027 while (p < end) {
2028 m = (struct ivmd_header *)p;
2029 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2030 init_exclusion_range(m);
2031 else if (m->flags & IVMD_FLAG_UNITY_MAP)
2032 init_unity_map_range(m);
2033
2034 p += m->length;
2035 }
2036
2037 return 0;
2038}
2039
2040
2041
2042
2043static void init_device_table_dma(void)
2044{
2045 u32 devid;
2046
2047 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2048 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
2049 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
2050 }
2051}
2052
2053static void __init uninit_device_table_dma(void)
2054{
2055 u32 devid;
2056
2057 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2058 amd_iommu_dev_table[devid].data[0] = 0ULL;
2059 amd_iommu_dev_table[devid].data[1] = 0ULL;
2060 }
2061}
2062
2063static void init_device_table(void)
2064{
2065 u32 devid;
2066
2067 if (!amd_iommu_irq_remap)
2068 return;
2069
2070 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2071 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
2072}
2073
2074static void iommu_init_flags(struct amd_iommu *iommu)
2075{
2076 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2077 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2078 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2079
2080 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2081 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2082 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2083
2084 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2085 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2086 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2087
2088 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2089 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2090 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2091
2092
2093
2094
2095 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2096
2097
2098 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
2099}
2100
2101static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2102{
2103 int i, j;
2104 u32 ioc_feature_control;
2105 struct pci_dev *pdev = iommu->root_pdev;
2106
2107
2108 if (!is_rd890_iommu(iommu->dev) || !pdev)
2109 return;
2110
2111
2112
2113
2114
2115
2116
2117 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2118 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2119
2120
2121 if (!(ioc_feature_control & 0x1))
2122 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2123
2124
2125 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2126 iommu->stored_addr_lo);
2127 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2128 iommu->stored_addr_hi);
2129
2130
2131 for (i = 0; i < 6; i++)
2132 for (j = 0; j < 0x12; j++)
2133 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2134
2135
2136 for (i = 0; i < 0x83; i++)
2137 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2138
2139
2140 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2141 iommu->stored_addr_lo | 1);
2142}
2143
2144static void iommu_enable_ga(struct amd_iommu *iommu)
2145{
2146#ifdef CONFIG_IRQ_REMAP
2147 switch (amd_iommu_guest_ir) {
2148 case AMD_IOMMU_GUEST_IR_VAPIC:
2149 iommu_feature_enable(iommu, CONTROL_GAM_EN);
2150
2151 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2152 iommu_feature_enable(iommu, CONTROL_GA_EN);
2153 iommu->irte_ops = &irte_128_ops;
2154 break;
2155 default:
2156 iommu->irte_ops = &irte_32_ops;
2157 break;
2158 }
2159#endif
2160}
2161
2162static void early_enable_iommu(struct amd_iommu *iommu)
2163{
2164 iommu_disable(iommu);
2165 iommu_init_flags(iommu);
2166 iommu_set_device_table(iommu);
2167 iommu_enable_command_buffer(iommu);
2168 iommu_enable_event_buffer(iommu);
2169 iommu_set_exclusion_range(iommu);
2170 iommu_enable_ga(iommu);
2171 iommu_enable(iommu);
2172 iommu_flush_all_caches(iommu);
2173}
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183static void early_enable_iommus(void)
2184{
2185 struct amd_iommu *iommu;
2186
2187
2188 if (!copy_device_table()) {
2189
2190
2191
2192
2193
2194 if (amd_iommu_pre_enabled)
2195 pr_err("Failed to copy DEV table from previous kernel.\n");
2196 if (old_dev_tbl_cpy != NULL)
2197 free_pages((unsigned long)old_dev_tbl_cpy,
2198 get_order(dev_table_size));
2199
2200 for_each_iommu(iommu) {
2201 clear_translation_pre_enabled(iommu);
2202 early_enable_iommu(iommu);
2203 }
2204 } else {
2205 pr_info("Copied DEV table from previous kernel.\n");
2206 free_pages((unsigned long)amd_iommu_dev_table,
2207 get_order(dev_table_size));
2208 amd_iommu_dev_table = old_dev_tbl_cpy;
2209 for_each_iommu(iommu) {
2210 iommu_disable_command_buffer(iommu);
2211 iommu_disable_event_buffer(iommu);
2212 iommu_enable_command_buffer(iommu);
2213 iommu_enable_event_buffer(iommu);
2214 iommu_enable_ga(iommu);
2215 iommu_set_device_table(iommu);
2216 iommu_flush_all_caches(iommu);
2217 }
2218 }
2219
2220#ifdef CONFIG_IRQ_REMAP
2221 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2222 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2223#endif
2224}
2225
2226static void enable_iommus_v2(void)
2227{
2228 struct amd_iommu *iommu;
2229
2230 for_each_iommu(iommu) {
2231 iommu_enable_ppr_log(iommu);
2232 iommu_enable_gt(iommu);
2233 }
2234}
2235
2236static void enable_iommus(void)
2237{
2238 early_enable_iommus();
2239
2240 enable_iommus_v2();
2241}
2242
2243static void disable_iommus(void)
2244{
2245 struct amd_iommu *iommu;
2246
2247 for_each_iommu(iommu)
2248 iommu_disable(iommu);
2249
2250#ifdef CONFIG_IRQ_REMAP
2251 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2252 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2253#endif
2254}
2255
2256
2257
2258
2259
2260
2261static void amd_iommu_resume(void)
2262{
2263 struct amd_iommu *iommu;
2264
2265 for_each_iommu(iommu)
2266 iommu_apply_resume_quirks(iommu);
2267
2268
2269 enable_iommus();
2270
2271 amd_iommu_enable_interrupts();
2272}
2273
2274static int amd_iommu_suspend(void)
2275{
2276
2277 disable_iommus();
2278
2279 return 0;
2280}
2281
2282static struct syscore_ops amd_iommu_syscore_ops = {
2283 .suspend = amd_iommu_suspend,
2284 .resume = amd_iommu_resume,
2285};
2286
2287static void __init free_iommu_resources(void)
2288{
2289 kmemleak_free(irq_lookup_table);
2290 free_pages((unsigned long)irq_lookup_table,
2291 get_order(rlookup_table_size));
2292 irq_lookup_table = NULL;
2293
2294 kmem_cache_destroy(amd_iommu_irq_cache);
2295 amd_iommu_irq_cache = NULL;
2296
2297 free_pages((unsigned long)amd_iommu_rlookup_table,
2298 get_order(rlookup_table_size));
2299 amd_iommu_rlookup_table = NULL;
2300
2301 free_pages((unsigned long)amd_iommu_alias_table,
2302 get_order(alias_table_size));
2303 amd_iommu_alias_table = NULL;
2304
2305 free_pages((unsigned long)amd_iommu_dev_table,
2306 get_order(dev_table_size));
2307 amd_iommu_dev_table = NULL;
2308
2309 free_iommu_all();
2310
2311#ifdef CONFIG_GART_IOMMU
2312
2313
2314
2315
2316 gart_iommu_init();
2317
2318#endif
2319}
2320
2321
2322#define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2323
2324static bool __init check_ioapic_information(void)
2325{
2326 const char *fw_bug = FW_BUG;
2327 bool ret, has_sb_ioapic;
2328 int idx;
2329
2330 has_sb_ioapic = false;
2331 ret = false;
2332
2333
2334
2335
2336
2337
2338 if (cmdline_maps)
2339 fw_bug = "";
2340
2341 for (idx = 0; idx < nr_ioapics; idx++) {
2342 int devid, id = mpc_ioapic_id(idx);
2343
2344 devid = get_ioapic_devid(id);
2345 if (devid < 0) {
2346 pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n",
2347 fw_bug, id);
2348 ret = false;
2349 } else if (devid == IOAPIC_SB_DEVID) {
2350 has_sb_ioapic = true;
2351 ret = true;
2352 }
2353 }
2354
2355 if (!has_sb_ioapic) {
2356
2357
2358
2359
2360
2361
2362
2363
2364 pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug);
2365 }
2366
2367 if (!ret)
2368 pr_err("AMD-Vi: Disabling interrupt remapping\n");
2369
2370 return ret;
2371}
2372
2373static void __init free_dma_resources(void)
2374{
2375 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2376 get_order(MAX_DOMAIN_ID/8));
2377 amd_iommu_pd_alloc_bitmap = NULL;
2378
2379 free_unity_maps();
2380}
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409static int __init early_amd_iommu_init(void)
2410{
2411 struct acpi_table_header *ivrs_base;
2412 acpi_status status;
2413 int i, remap_cache_sz, ret = 0;
2414
2415 if (!amd_iommu_detected)
2416 return -ENODEV;
2417
2418 status = acpi_get_table("IVRS", 0, &ivrs_base);
2419 if (status == AE_NOT_FOUND)
2420 return -ENODEV;
2421 else if (ACPI_FAILURE(status)) {
2422 const char *err = acpi_format_exception(status);
2423 pr_err("AMD-Vi: IVRS table error: %s\n", err);
2424 return -EINVAL;
2425 }
2426
2427
2428
2429
2430
2431 ret = check_ivrs_checksum(ivrs_base);
2432 if (ret)
2433 goto out;
2434
2435 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2436 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2437
2438
2439
2440
2441
2442
2443 ret = find_last_devid_acpi(ivrs_base);
2444 if (ret)
2445 goto out;
2446
2447 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
2448 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2449 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
2450
2451
2452 ret = -ENOMEM;
2453 amd_iommu_dev_table = (void *)__get_free_pages(
2454 GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
2455 get_order(dev_table_size));
2456 if (amd_iommu_dev_table == NULL)
2457 goto out;
2458
2459
2460
2461
2462
2463 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2464 get_order(alias_table_size));
2465 if (amd_iommu_alias_table == NULL)
2466 goto out;
2467
2468
2469 amd_iommu_rlookup_table = (void *)__get_free_pages(
2470 GFP_KERNEL | __GFP_ZERO,
2471 get_order(rlookup_table_size));
2472 if (amd_iommu_rlookup_table == NULL)
2473 goto out;
2474
2475 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2476 GFP_KERNEL | __GFP_ZERO,
2477 get_order(MAX_DOMAIN_ID/8));
2478 if (amd_iommu_pd_alloc_bitmap == NULL)
2479 goto out;
2480
2481
2482
2483
2484 for (i = 0; i <= amd_iommu_last_bdf; ++i)
2485 amd_iommu_alias_table[i] = i;
2486
2487
2488
2489
2490
2491 __set_bit(0, amd_iommu_pd_alloc_bitmap);
2492
2493 spin_lock_init(&amd_iommu_pd_lock);
2494
2495
2496
2497
2498
2499 ret = init_iommu_all(ivrs_base);
2500 if (ret)
2501 goto out;
2502
2503
2504 if (!is_kdump_kernel() || amd_iommu_disabled)
2505 disable_iommus();
2506
2507 if (amd_iommu_irq_remap)
2508 amd_iommu_irq_remap = check_ioapic_information();
2509
2510 if (amd_iommu_irq_remap) {
2511
2512
2513
2514
2515 ret = -ENOMEM;
2516 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2517 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2518 else
2519 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
2520 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
2521 remap_cache_sz,
2522 IRQ_TABLE_ALIGNMENT,
2523 0, NULL);
2524 if (!amd_iommu_irq_cache)
2525 goto out;
2526
2527 irq_lookup_table = (void *)__get_free_pages(
2528 GFP_KERNEL | __GFP_ZERO,
2529 get_order(rlookup_table_size));
2530 kmemleak_alloc(irq_lookup_table, rlookup_table_size,
2531 1, GFP_KERNEL);
2532 if (!irq_lookup_table)
2533 goto out;
2534 }
2535
2536 ret = init_memory_definitions(ivrs_base);
2537 if (ret)
2538 goto out;
2539
2540
2541 init_device_table();
2542
2543out:
2544
2545 acpi_put_table(ivrs_base);
2546 ivrs_base = NULL;
2547
2548 return ret;
2549}
2550
2551static int amd_iommu_enable_interrupts(void)
2552{
2553 struct amd_iommu *iommu;
2554 int ret = 0;
2555
2556 for_each_iommu(iommu) {
2557 ret = iommu_init_msi(iommu);
2558 if (ret)
2559 goto out;
2560 }
2561
2562out:
2563 return ret;
2564}
2565
2566static bool detect_ivrs(void)
2567{
2568 struct acpi_table_header *ivrs_base;
2569 acpi_status status;
2570
2571 status = acpi_get_table("IVRS", 0, &ivrs_base);
2572 if (status == AE_NOT_FOUND)
2573 return false;
2574 else if (ACPI_FAILURE(status)) {
2575 const char *err = acpi_format_exception(status);
2576 pr_err("AMD-Vi: IVRS table error: %s\n", err);
2577 return false;
2578 }
2579
2580 acpi_put_table(ivrs_base);
2581
2582
2583 pci_request_acs();
2584
2585 return true;
2586}
2587
2588
2589
2590
2591
2592
2593
2594static int __init state_next(void)
2595{
2596 int ret = 0;
2597
2598 switch (init_state) {
2599 case IOMMU_START_STATE:
2600 if (!detect_ivrs()) {
2601 init_state = IOMMU_NOT_FOUND;
2602 ret = -ENODEV;
2603 } else {
2604 init_state = IOMMU_IVRS_DETECTED;
2605 }
2606 break;
2607 case IOMMU_IVRS_DETECTED:
2608 ret = early_amd_iommu_init();
2609 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
2610 if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
2611 pr_info("AMD-Vi: AMD IOMMU disabled on kernel command-line\n");
2612 free_dma_resources();
2613 free_iommu_resources();
2614 init_state = IOMMU_CMDLINE_DISABLED;
2615 ret = -EINVAL;
2616 }
2617 break;
2618 case IOMMU_ACPI_FINISHED:
2619 early_enable_iommus();
2620 x86_platform.iommu_shutdown = disable_iommus;
2621 init_state = IOMMU_ENABLED;
2622 break;
2623 case IOMMU_ENABLED:
2624 register_syscore_ops(&amd_iommu_syscore_ops);
2625 ret = amd_iommu_init_pci();
2626 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2627 enable_iommus_v2();
2628 break;
2629 case IOMMU_PCI_INIT:
2630 ret = amd_iommu_enable_interrupts();
2631 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2632 break;
2633 case IOMMU_INTERRUPTS_EN:
2634 ret = amd_iommu_init_dma_ops();
2635 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2636 break;
2637 case IOMMU_DMA_OPS:
2638 init_state = IOMMU_INITIALIZED;
2639 break;
2640 case IOMMU_INITIALIZED:
2641
2642 break;
2643 case IOMMU_NOT_FOUND:
2644 case IOMMU_INIT_ERROR:
2645 case IOMMU_CMDLINE_DISABLED:
2646
2647 ret = -EINVAL;
2648 break;
2649 default:
2650
2651 BUG();
2652 }
2653
2654 return ret;
2655}
2656
2657static int __init iommu_go_to_state(enum iommu_init_state state)
2658{
2659 int ret = -EINVAL;
2660
2661 while (init_state != state) {
2662 if (init_state == IOMMU_NOT_FOUND ||
2663 init_state == IOMMU_INIT_ERROR ||
2664 init_state == IOMMU_CMDLINE_DISABLED)
2665 break;
2666 ret = state_next();
2667 }
2668
2669 return ret;
2670}
2671
2672#ifdef CONFIG_IRQ_REMAP
2673int __init amd_iommu_prepare(void)
2674{
2675 int ret;
2676
2677 amd_iommu_irq_remap = true;
2678
2679 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2680 if (ret)
2681 return ret;
2682 return amd_iommu_irq_remap ? 0 : -ENODEV;
2683}
2684
2685int __init amd_iommu_enable(void)
2686{
2687 int ret;
2688
2689 ret = iommu_go_to_state(IOMMU_ENABLED);
2690 if (ret)
2691 return ret;
2692
2693 irq_remapping_enabled = 1;
2694
2695 return 0;
2696}
2697
2698void amd_iommu_disable(void)
2699{
2700 amd_iommu_suspend();
2701}
2702
2703int amd_iommu_reenable(int mode)
2704{
2705 amd_iommu_resume();
2706
2707 return 0;
2708}
2709
2710int __init amd_iommu_enable_faulting(void)
2711{
2712
2713 return 0;
2714}
2715#endif
2716
2717
2718
2719
2720
2721
2722static int __init amd_iommu_init(void)
2723{
2724 int ret;
2725
2726 ret = iommu_go_to_state(IOMMU_INITIALIZED);
2727 if (ret) {
2728 free_dma_resources();
2729 if (!irq_remapping_enabled) {
2730 disable_iommus();
2731 free_iommu_resources();
2732 } else {
2733 struct amd_iommu *iommu;
2734
2735 uninit_device_table_dma();
2736 for_each_iommu(iommu)
2737 iommu_flush_all_caches(iommu);
2738 }
2739 }
2740
2741 return ret;
2742}
2743
2744static bool amd_iommu_sme_check(void)
2745{
2746 if (!sme_active() || (boot_cpu_data.x86 != 0x17))
2747 return true;
2748
2749
2750 if (boot_cpu_data.microcode >= 0x08001205)
2751 return true;
2752
2753 if ((boot_cpu_data.microcode >= 0x08001126) &&
2754 (boot_cpu_data.microcode <= 0x080011ff))
2755 return true;
2756
2757 pr_notice("AMD-Vi: IOMMU not currently supported when SME is active\n");
2758
2759 return false;
2760}
2761
2762
2763
2764
2765
2766
2767
2768
2769int __init amd_iommu_detect(void)
2770{
2771 int ret;
2772
2773 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
2774 return -ENODEV;
2775
2776 if (!amd_iommu_sme_check())
2777 return -ENODEV;
2778
2779 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2780 if (ret)
2781 return ret;
2782
2783 amd_iommu_detected = true;
2784 iommu_detected = 1;
2785 x86_init.iommu.iommu_init = amd_iommu_init;
2786
2787 return 1;
2788}
2789
2790
2791
2792
2793
2794
2795
2796
2797static int __init parse_amd_iommu_dump(char *str)
2798{
2799 amd_iommu_dump = true;
2800
2801 return 1;
2802}
2803
2804static int __init parse_amd_iommu_intr(char *str)
2805{
2806 for (; *str; ++str) {
2807 if (strncmp(str, "legacy", 6) == 0) {
2808 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
2809 break;
2810 }
2811 if (strncmp(str, "vapic", 5) == 0) {
2812 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
2813 break;
2814 }
2815 }
2816 return 1;
2817}
2818
2819static int __init parse_amd_iommu_options(char *str)
2820{
2821 for (; *str; ++str) {
2822 if (strncmp(str, "fullflush", 9) == 0)
2823 amd_iommu_unmap_flush = true;
2824 if (strncmp(str, "off", 3) == 0)
2825 amd_iommu_disabled = true;
2826 if (strncmp(str, "force_isolation", 15) == 0)
2827 amd_iommu_force_isolation = true;
2828 }
2829
2830 return 1;
2831}
2832
2833static int __init parse_ivrs_ioapic(char *str)
2834{
2835 unsigned int bus, dev, fn;
2836 int ret, id, i;
2837 u16 devid;
2838
2839 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2840
2841 if (ret != 4) {
2842 pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str);
2843 return 1;
2844 }
2845
2846 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
2847 pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
2848 str);
2849 return 1;
2850 }
2851
2852 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2853
2854 cmdline_maps = true;
2855 i = early_ioapic_map_size++;
2856 early_ioapic_map[i].id = id;
2857 early_ioapic_map[i].devid = devid;
2858 early_ioapic_map[i].cmd_line = true;
2859
2860 return 1;
2861}
2862
2863static int __init parse_ivrs_hpet(char *str)
2864{
2865 unsigned int bus, dev, fn;
2866 int ret, id, i;
2867 u16 devid;
2868
2869 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2870
2871 if (ret != 4) {
2872 pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str);
2873 return 1;
2874 }
2875
2876 if (early_hpet_map_size == EARLY_MAP_SIZE) {
2877 pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n",
2878 str);
2879 return 1;
2880 }
2881
2882 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2883
2884 cmdline_maps = true;
2885 i = early_hpet_map_size++;
2886 early_hpet_map[i].id = id;
2887 early_hpet_map[i].devid = devid;
2888 early_hpet_map[i].cmd_line = true;
2889
2890 return 1;
2891}
2892
2893static int __init parse_ivrs_acpihid(char *str)
2894{
2895 u32 bus, dev, fn;
2896 char *hid, *uid, *p;
2897 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
2898 int ret, i;
2899
2900 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
2901 if (ret != 4) {
2902 pr_err("AMD-Vi: Invalid command line: ivrs_acpihid(%s)\n", str);
2903 return 1;
2904 }
2905
2906 p = acpiid;
2907 hid = strsep(&p, ":");
2908 uid = p;
2909
2910 if (!hid || !(*hid) || !uid) {
2911 pr_err("AMD-Vi: Invalid command line: hid or uid\n");
2912 return 1;
2913 }
2914
2915 i = early_acpihid_map_size++;
2916 memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
2917 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
2918 early_acpihid_map[i].devid =
2919 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2920 early_acpihid_map[i].cmd_line = true;
2921
2922 return 1;
2923}
2924
2925__setup("amd_iommu_dump", parse_amd_iommu_dump);
2926__setup("amd_iommu=", parse_amd_iommu_options);
2927__setup("amd_iommu_intr=", parse_amd_iommu_intr);
2928__setup("ivrs_ioapic", parse_ivrs_ioapic);
2929__setup("ivrs_hpet", parse_ivrs_hpet);
2930__setup("ivrs_acpihid", parse_ivrs_acpihid);
2931
2932IOMMU_INIT_FINISH(amd_iommu_detect,
2933 gart_iommu_hole_init,
2934 NULL,
2935 NULL);
2936
2937bool amd_iommu_v2_supported(void)
2938{
2939 return amd_iommu_v2_present;
2940}
2941EXPORT_SYMBOL(amd_iommu_v2_supported);
2942
2943struct amd_iommu *get_amd_iommu(unsigned int idx)
2944{
2945 unsigned int i = 0;
2946 struct amd_iommu *iommu;
2947
2948 for_each_iommu(iommu)
2949 if (i++ == idx)
2950 return iommu;
2951 return NULL;
2952}
2953EXPORT_SYMBOL(get_amd_iommu);
2954
2955
2956
2957
2958
2959
2960
2961
2962u8 amd_iommu_pc_get_max_banks(unsigned int idx)
2963{
2964 struct amd_iommu *iommu = get_amd_iommu(idx);
2965
2966 if (iommu)
2967 return iommu->max_banks;
2968
2969 return 0;
2970}
2971EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
2972
2973bool amd_iommu_pc_supported(void)
2974{
2975 return amd_iommu_pc_present;
2976}
2977EXPORT_SYMBOL(amd_iommu_pc_supported);
2978
2979u8 amd_iommu_pc_get_max_counters(unsigned int idx)
2980{
2981 struct amd_iommu *iommu = get_amd_iommu(idx);
2982
2983 if (iommu)
2984 return iommu->max_counters;
2985
2986 return 0;
2987}
2988EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
2989
2990static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
2991 u8 fxn, u64 *value, bool is_write)
2992{
2993 u32 offset;
2994 u32 max_offset_lim;
2995
2996
2997 if (!amd_iommu_pc_present)
2998 return -ENODEV;
2999
3000
3001 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3002 return -ENODEV;
3003
3004 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
3005
3006
3007 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3008 (iommu->max_counters << 8) | 0x28);
3009 if ((offset < MMIO_CNTR_REG_OFFSET) ||
3010 (offset > max_offset_lim))
3011 return -EINVAL;
3012
3013 if (is_write) {
3014 u64 val = *value & GENMASK_ULL(47, 0);
3015
3016 writel((u32)val, iommu->mmio_base + offset);
3017 writel((val >> 32), iommu->mmio_base + offset + 4);
3018 } else {
3019 *value = readl(iommu->mmio_base + offset + 4);
3020 *value <<= 32;
3021 *value |= readl(iommu->mmio_base + offset);
3022 *value &= GENMASK_ULL(47, 0);
3023 }
3024
3025 return 0;
3026}
3027
3028int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3029{
3030 if (!iommu)
3031 return -EINVAL;
3032
3033 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3034}
3035EXPORT_SYMBOL(amd_iommu_pc_get_reg);
3036
3037int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3038{
3039 if (!iommu)
3040 return -EINVAL;
3041
3042 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
3043}
3044EXPORT_SYMBOL(amd_iommu_pc_set_reg);
3045