1
2
3
4
5
6
7
8#define pr_fmt(fmt) "AMD-Vi: " fmt
9#define dev_fmt(fmt) pr_fmt(fmt)
10
11#include <linux/pci.h>
12#include <linux/acpi.h>
13#include <linux/list.h>
14#include <linux/bitmap.h>
15#include <linux/slab.h>
16#include <linux/syscore_ops.h>
17#include <linux/interrupt.h>
18#include <linux/msi.h>
19#include <linux/irq.h>
20#include <linux/amd-iommu.h>
21#include <linux/export.h>
22#include <linux/kmemleak.h>
23#include <linux/cc_platform.h>
24#include <linux/iopoll.h>
25#include <asm/pci-direct.h>
26#include <asm/iommu.h>
27#include <asm/apic.h>
28#include <asm/gart.h>
29#include <asm/x86_init.h>
30#include <asm/iommu_table.h>
31#include <asm/io_apic.h>
32#include <asm/irq_remapping.h>
33#include <asm/set_memory.h>
34
35#include <linux/crash_dump.h>
36
37#include "amd_iommu.h"
38#include "../irq_remapping.h"
39
40
41
42
43#define IVRS_HEADER_LENGTH 48
44
45#define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
46#define ACPI_IVMD_TYPE_ALL 0x20
47#define ACPI_IVMD_TYPE 0x21
48#define ACPI_IVMD_TYPE_RANGE 0x22
49
50#define IVHD_DEV_ALL 0x01
51#define IVHD_DEV_SELECT 0x02
52#define IVHD_DEV_SELECT_RANGE_START 0x03
53#define IVHD_DEV_RANGE_END 0x04
54#define IVHD_DEV_ALIAS 0x42
55#define IVHD_DEV_ALIAS_RANGE 0x43
56#define IVHD_DEV_EXT_SELECT 0x46
57#define IVHD_DEV_EXT_SELECT_RANGE 0x47
58#define IVHD_DEV_SPECIAL 0x48
59#define IVHD_DEV_ACPI_HID 0xf0
60
61#define UID_NOT_PRESENT 0
62#define UID_IS_INTEGER 1
63#define UID_IS_CHARACTER 2
64
65#define IVHD_SPECIAL_IOAPIC 1
66#define IVHD_SPECIAL_HPET 2
67
68#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
69#define IVHD_FLAG_PASSPW_EN_MASK 0x02
70#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
71#define IVHD_FLAG_ISOC_EN_MASK 0x08
72
73#define IVMD_FLAG_EXCL_RANGE 0x08
74#define IVMD_FLAG_IW 0x04
75#define IVMD_FLAG_IR 0x02
76#define IVMD_FLAG_UNITY_MAP 0x01
77
78#define ACPI_DEVFLAG_INITPASS 0x01
79#define ACPI_DEVFLAG_EXTINT 0x02
80#define ACPI_DEVFLAG_NMI 0x04
81#define ACPI_DEVFLAG_SYSMGT1 0x10
82#define ACPI_DEVFLAG_SYSMGT2 0x20
83#define ACPI_DEVFLAG_LINT0 0x40
84#define ACPI_DEVFLAG_LINT1 0x80
85#define ACPI_DEVFLAG_ATSDIS 0x10000000
86
87#define LOOP_TIMEOUT 100000
88
89
90
91
92
93
94
95extern const struct iommu_ops amd_iommu_ops;
96
97
98
99
100
101struct ivhd_header {
102 u8 type;
103 u8 flags;
104 u16 length;
105 u16 devid;
106 u16 cap_ptr;
107 u64 mmio_phys;
108 u16 pci_seg;
109 u16 info;
110 u32 efr_attr;
111
112
113 u64 efr_reg;
114 u64 res;
115} __attribute__((packed));
116
117
118
119
120
121struct ivhd_entry {
122 u8 type;
123 u16 devid;
124 u8 flags;
125 struct_group(ext_hid,
126 u32 ext;
127 u32 hidh;
128 );
129 u64 cid;
130 u8 uidf;
131 u8 uidl;
132 u8 uid;
133} __attribute__((packed));
134
135
136
137
138
139struct ivmd_header {
140 u8 type;
141 u8 flags;
142 u16 length;
143 u16 devid;
144 u16 aux;
145 u64 resv;
146 u64 range_start;
147 u64 range_length;
148} __attribute__((packed));
149
150bool amd_iommu_dump;
151bool amd_iommu_irq_remap __read_mostly;
152
153enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1;
154
155int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
156static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
157
158static bool amd_iommu_detected;
159static bool amd_iommu_disabled __initdata;
160static bool amd_iommu_force_enable __initdata;
161static int amd_iommu_target_ivhd_type;
162
163u16 amd_iommu_last_bdf;
164
165LIST_HEAD(amd_iommu_unity_map);
166
167
168LIST_HEAD(amd_iommu_list);
169
170
171
172struct amd_iommu *amd_iommus[MAX_IOMMUS];
173
174
175static int amd_iommus_present;
176
177
178bool amd_iommu_np_cache __read_mostly;
179bool amd_iommu_iotlb_sup __read_mostly = true;
180
181u32 amd_iommu_max_pasid __read_mostly = ~0;
182
183bool amd_iommu_v2_present __read_mostly;
184static bool amd_iommu_pc_present __read_mostly;
185
186bool amd_iommu_force_isolation __read_mostly;
187
188
189
190
191
192
193
194struct dev_table_entry *amd_iommu_dev_table;
195
196
197
198
199static struct dev_table_entry *old_dev_tbl_cpy;
200
201
202
203
204
205
206u16 *amd_iommu_alias_table;
207
208
209
210
211
212struct amd_iommu **amd_iommu_rlookup_table;
213
214
215
216
217
218struct irq_remap_table **irq_lookup_table;
219
220
221
222
223
224unsigned long *amd_iommu_pd_alloc_bitmap;
225
226static u32 dev_table_size;
227static u32 alias_table_size;
228static u32 rlookup_table_size;
229
230enum iommu_init_state {
231 IOMMU_START_STATE,
232 IOMMU_IVRS_DETECTED,
233 IOMMU_ACPI_FINISHED,
234 IOMMU_ENABLED,
235 IOMMU_PCI_INIT,
236 IOMMU_INTERRUPTS_EN,
237 IOMMU_INITIALIZED,
238 IOMMU_NOT_FOUND,
239 IOMMU_INIT_ERROR,
240 IOMMU_CMDLINE_DISABLED,
241};
242
243
244#define EARLY_MAP_SIZE 4
245static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
246static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
247static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
248
249static int __initdata early_ioapic_map_size;
250static int __initdata early_hpet_map_size;
251static int __initdata early_acpihid_map_size;
252
253static bool __initdata cmdline_maps;
254
255static enum iommu_init_state init_state = IOMMU_START_STATE;
256
257static int amd_iommu_enable_interrupts(void);
258static int __init iommu_go_to_state(enum iommu_init_state state);
259static void init_device_table_dma(void);
260
261static bool amd_iommu_pre_enabled = true;
262
263static u32 amd_iommu_ivinfo __initdata;
264
265bool translation_pre_enabled(struct amd_iommu *iommu)
266{
267 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
268}
269
270static void clear_translation_pre_enabled(struct amd_iommu *iommu)
271{
272 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
273}
274
275static void init_translation_status(struct amd_iommu *iommu)
276{
277 u64 ctrl;
278
279 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
280 if (ctrl & (1<<CONTROL_IOMMU_EN))
281 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
282}
283
284static inline void update_last_devid(u16 devid)
285{
286 if (devid > amd_iommu_last_bdf)
287 amd_iommu_last_bdf = devid;
288}
289
290static inline unsigned long tbl_size(int entry_size)
291{
292 unsigned shift = PAGE_SHIFT +
293 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
294
295 return 1UL << shift;
296}
297
298int amd_iommu_get_num_iommus(void)
299{
300 return amd_iommus_present;
301}
302
303#ifdef CONFIG_IRQ_REMAP
304static bool check_feature_on_all_iommus(u64 mask)
305{
306 bool ret = false;
307 struct amd_iommu *iommu;
308
309 for_each_iommu(iommu) {
310 ret = iommu_feature(iommu, mask);
311 if (!ret)
312 return false;
313 }
314
315 return true;
316}
317#endif
318
319
320
321
322
323
324static void __init early_iommu_features_init(struct amd_iommu *iommu,
325 struct ivhd_header *h)
326{
327 if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
328 iommu->features = h->efr_reg;
329}
330
331
332
333static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
334{
335 u32 val;
336
337 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
338 pci_read_config_dword(iommu->dev, 0xfc, &val);
339 return val;
340}
341
342static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
343{
344 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
345 pci_write_config_dword(iommu->dev, 0xfc, val);
346 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
347}
348
349static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
350{
351 u32 val;
352
353 pci_write_config_dword(iommu->dev, 0xf0, address);
354 pci_read_config_dword(iommu->dev, 0xf4, &val);
355 return val;
356}
357
358static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
359{
360 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
361 pci_write_config_dword(iommu->dev, 0xf4, val);
362}
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377static void iommu_set_exclusion_range(struct amd_iommu *iommu)
378{
379 u64 start = iommu->exclusion_start & PAGE_MASK;
380 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
381 u64 entry;
382
383 if (!iommu->exclusion_start)
384 return;
385
386 entry = start | MMIO_EXCL_ENABLE_MASK;
387 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
388 &entry, sizeof(entry));
389
390 entry = limit;
391 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
392 &entry, sizeof(entry));
393}
394
395static void iommu_set_cwwb_range(struct amd_iommu *iommu)
396{
397 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
398 u64 entry = start & PM_ADDR_MASK;
399
400 if (!iommu_feature(iommu, FEATURE_SNP))
401 return;
402
403
404
405
406
407 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
408 &entry, sizeof(entry));
409
410
411
412
413
414 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
415 &entry, sizeof(entry));
416}
417
418
419static void iommu_set_device_table(struct amd_iommu *iommu)
420{
421 u64 entry;
422
423 BUG_ON(iommu->mmio_base == NULL);
424
425 entry = iommu_virt_to_phys(amd_iommu_dev_table);
426 entry |= (dev_table_size >> 12) - 1;
427 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
428 &entry, sizeof(entry));
429}
430
431
432static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
433{
434 u64 ctrl;
435
436 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
437 ctrl |= (1ULL << bit);
438 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
439}
440
441static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
442{
443 u64 ctrl;
444
445 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
446 ctrl &= ~(1ULL << bit);
447 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
448}
449
450static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
451{
452 u64 ctrl;
453
454 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
455 ctrl &= ~CTRL_INV_TO_MASK;
456 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
457 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
458}
459
460
461static void iommu_enable(struct amd_iommu *iommu)
462{
463 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
464}
465
466static void iommu_disable(struct amd_iommu *iommu)
467{
468 if (!iommu->mmio_base)
469 return;
470
471
472 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
473
474
475 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
476 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
477
478
479 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
480 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
481
482
483 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
484}
485
486
487
488
489
490static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
491{
492 if (!request_mem_region(address, end, "amd_iommu")) {
493 pr_err("Can not reserve memory region %llx-%llx for mmio\n",
494 address, end);
495 pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
496 return NULL;
497 }
498
499 return (u8 __iomem *)ioremap(address, end);
500}
501
502static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
503{
504 if (iommu->mmio_base)
505 iounmap(iommu->mmio_base);
506 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
507}
508
509static inline u32 get_ivhd_header_size(struct ivhd_header *h)
510{
511 u32 size = 0;
512
513 switch (h->type) {
514 case 0x10:
515 size = 24;
516 break;
517 case 0x11:
518 case 0x40:
519 size = 40;
520 break;
521 }
522 return size;
523}
524
525
526
527
528
529
530
531
532
533
534
535
536
537static inline int ivhd_entry_length(u8 *ivhd)
538{
539 u32 type = ((struct ivhd_entry *)ivhd)->type;
540
541 if (type < 0x80) {
542 return 0x04 << (*ivhd >> 6);
543 } else if (type == IVHD_DEV_ACPI_HID) {
544
545 return *((u8 *)ivhd + 21) + 22;
546 }
547 return 0;
548}
549
550
551
552
553
554static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
555{
556 u8 *p = (void *)h, *end = (void *)h;
557 struct ivhd_entry *dev;
558
559 u32 ivhd_size = get_ivhd_header_size(h);
560
561 if (!ivhd_size) {
562 pr_err("Unsupported IVHD type %#x\n", h->type);
563 return -EINVAL;
564 }
565
566 p += ivhd_size;
567 end += h->length;
568
569 while (p < end) {
570 dev = (struct ivhd_entry *)p;
571 switch (dev->type) {
572 case IVHD_DEV_ALL:
573
574 update_last_devid(0xffff);
575 break;
576 case IVHD_DEV_SELECT:
577 case IVHD_DEV_RANGE_END:
578 case IVHD_DEV_ALIAS:
579 case IVHD_DEV_EXT_SELECT:
580
581 update_last_devid(dev->devid);
582 break;
583 default:
584 break;
585 }
586 p += ivhd_entry_length(p);
587 }
588
589 WARN_ON(p != end);
590
591 return 0;
592}
593
594static int __init check_ivrs_checksum(struct acpi_table_header *table)
595{
596 int i;
597 u8 checksum = 0, *p = (u8 *)table;
598
599 for (i = 0; i < table->length; ++i)
600 checksum += p[i];
601 if (checksum != 0) {
602
603 pr_err(FW_BUG "IVRS invalid checksum\n");
604 return -ENODEV;
605 }
606
607 return 0;
608}
609
610
611
612
613
614
615static int __init find_last_devid_acpi(struct acpi_table_header *table)
616{
617 u8 *p = (u8 *)table, *end = (u8 *)table;
618 struct ivhd_header *h;
619
620 p += IVRS_HEADER_LENGTH;
621
622 end += table->length;
623 while (p < end) {
624 h = (struct ivhd_header *)p;
625 if (h->type == amd_iommu_target_ivhd_type) {
626 int ret = find_last_devid_from_ivhd(h);
627
628 if (ret)
629 return ret;
630 }
631 p += h->length;
632 }
633 WARN_ON(p != end);
634
635 return 0;
636}
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652static int __init alloc_command_buffer(struct amd_iommu *iommu)
653{
654 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
655 get_order(CMD_BUFFER_SIZE));
656
657 return iommu->cmd_buf ? 0 : -ENOMEM;
658}
659
660
661
662
663
664void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
665{
666 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
667 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
668}
669
670
671
672
673
674void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
675{
676 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
677
678 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
679 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
680 iommu->cmd_buf_head = 0;
681 iommu->cmd_buf_tail = 0;
682
683 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
684}
685
686
687
688
689
690static void iommu_enable_command_buffer(struct amd_iommu *iommu)
691{
692 u64 entry;
693
694 BUG_ON(iommu->cmd_buf == NULL);
695
696 entry = iommu_virt_to_phys(iommu->cmd_buf);
697 entry |= MMIO_CMD_SIZE_512;
698
699 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
700 &entry, sizeof(entry));
701
702 amd_iommu_reset_cmd_buffer(iommu);
703}
704
705
706
707
708static void iommu_disable_command_buffer(struct amd_iommu *iommu)
709{
710 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
711}
712
713static void __init free_command_buffer(struct amd_iommu *iommu)
714{
715 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
716}
717
718static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
719 gfp_t gfp, size_t size)
720{
721 int order = get_order(size);
722 void *buf = (void *)__get_free_pages(gfp, order);
723
724 if (buf &&
725 iommu_feature(iommu, FEATURE_SNP) &&
726 set_memory_4k((unsigned long)buf, (1 << order))) {
727 free_pages((unsigned long)buf, order);
728 buf = NULL;
729 }
730
731 return buf;
732}
733
734
735static int __init alloc_event_buffer(struct amd_iommu *iommu)
736{
737 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
738 EVT_BUFFER_SIZE);
739
740 return iommu->evt_buf ? 0 : -ENOMEM;
741}
742
743static void iommu_enable_event_buffer(struct amd_iommu *iommu)
744{
745 u64 entry;
746
747 BUG_ON(iommu->evt_buf == NULL);
748
749 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
750
751 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
752 &entry, sizeof(entry));
753
754
755 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
756 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
757
758 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
759}
760
761
762
763
764static void iommu_disable_event_buffer(struct amd_iommu *iommu)
765{
766 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
767}
768
769static void __init free_event_buffer(struct amd_iommu *iommu)
770{
771 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
772}
773
774
775static int __init alloc_ppr_log(struct amd_iommu *iommu)
776{
777 iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
778 PPR_LOG_SIZE);
779
780 return iommu->ppr_log ? 0 : -ENOMEM;
781}
782
783static void iommu_enable_ppr_log(struct amd_iommu *iommu)
784{
785 u64 entry;
786
787 if (iommu->ppr_log == NULL)
788 return;
789
790 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
791
792 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
793 &entry, sizeof(entry));
794
795
796 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
797 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
798
799 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
800 iommu_feature_enable(iommu, CONTROL_PPR_EN);
801}
802
803static void __init free_ppr_log(struct amd_iommu *iommu)
804{
805 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
806}
807
808static void free_ga_log(struct amd_iommu *iommu)
809{
810#ifdef CONFIG_IRQ_REMAP
811 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
812 free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
813#endif
814}
815
816static int iommu_ga_log_enable(struct amd_iommu *iommu)
817{
818#ifdef CONFIG_IRQ_REMAP
819 u32 status, i;
820 u64 entry;
821
822 if (!iommu->ga_log)
823 return -EINVAL;
824
825
826 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
827 if (WARN_ON(status & (MMIO_STATUS_GALOG_RUN_MASK)))
828 return 0;
829
830 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
831 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
832 &entry, sizeof(entry));
833 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
834 (BIT_ULL(52)-1)) & ~7ULL;
835 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
836 &entry, sizeof(entry));
837 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
838 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
839
840
841 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
842 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
843
844 for (i = 0; i < LOOP_TIMEOUT; ++i) {
845 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
846 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
847 break;
848 udelay(10);
849 }
850
851 if (WARN_ON(i >= LOOP_TIMEOUT))
852 return -EINVAL;
853#endif
854 return 0;
855}
856
857static int iommu_init_ga_log(struct amd_iommu *iommu)
858{
859#ifdef CONFIG_IRQ_REMAP
860 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
861 return 0;
862
863 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
864 get_order(GA_LOG_SIZE));
865 if (!iommu->ga_log)
866 goto err_out;
867
868 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
869 get_order(8));
870 if (!iommu->ga_log_tail)
871 goto err_out;
872
873 return 0;
874err_out:
875 free_ga_log(iommu);
876 return -EINVAL;
877#else
878 return 0;
879#endif
880}
881
882static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
883{
884 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
885
886 return iommu->cmd_sem ? 0 : -ENOMEM;
887}
888
889static void __init free_cwwb_sem(struct amd_iommu *iommu)
890{
891 if (iommu->cmd_sem)
892 free_page((unsigned long)iommu->cmd_sem);
893}
894
895static void iommu_enable_xt(struct amd_iommu *iommu)
896{
897#ifdef CONFIG_IRQ_REMAP
898
899
900
901
902 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
903 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
904 iommu_feature_enable(iommu, CONTROL_XT_EN);
905#endif
906}
907
908static void iommu_enable_gt(struct amd_iommu *iommu)
909{
910 if (!iommu_feature(iommu, FEATURE_GT))
911 return;
912
913 iommu_feature_enable(iommu, CONTROL_GT_EN);
914}
915
916
917static void set_dev_entry_bit(u16 devid, u8 bit)
918{
919 int i = (bit >> 6) & 0x03;
920 int _bit = bit & 0x3f;
921
922 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
923}
924
925static int get_dev_entry_bit(u16 devid, u8 bit)
926{
927 int i = (bit >> 6) & 0x03;
928 int _bit = bit & 0x3f;
929
930 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
931}
932
933
934static bool copy_device_table(void)
935{
936 u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
937 struct dev_table_entry *old_devtb = NULL;
938 u32 lo, hi, devid, old_devtb_size;
939 phys_addr_t old_devtb_phys;
940 struct amd_iommu *iommu;
941 u16 dom_id, dte_v, irq_v;
942 gfp_t gfp_flag;
943 u64 tmp;
944
945 if (!amd_iommu_pre_enabled)
946 return false;
947
948 pr_warn("Translation is already enabled - trying to copy translation structures\n");
949 for_each_iommu(iommu) {
950
951 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
952 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
953 entry = (((u64) hi) << 32) + lo;
954 if (last_entry && last_entry != entry) {
955 pr_err("IOMMU:%d should use the same dev table as others!\n",
956 iommu->index);
957 return false;
958 }
959 last_entry = entry;
960
961 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
962 if (old_devtb_size != dev_table_size) {
963 pr_err("The device table size of IOMMU:%d is not expected!\n",
964 iommu->index);
965 return false;
966 }
967 }
968
969
970
971
972
973
974 old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
975
976 if (old_devtb_phys >= 0x100000000ULL) {
977 pr_err("The address of old device table is above 4G, not trustworthy!\n");
978 return false;
979 }
980 old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
981 ? (__force void *)ioremap_encrypted(old_devtb_phys,
982 dev_table_size)
983 : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
984
985 if (!old_devtb)
986 return false;
987
988 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
989 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
990 get_order(dev_table_size));
991 if (old_dev_tbl_cpy == NULL) {
992 pr_err("Failed to allocate memory for copying old device table!\n");
993 return false;
994 }
995
996 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
997 old_dev_tbl_cpy[devid] = old_devtb[devid];
998 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
999 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
1000
1001 if (dte_v && dom_id) {
1002 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
1003 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
1004 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
1005
1006 if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
1007 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
1008 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1009 old_dev_tbl_cpy[devid].data[1] &= ~tmp;
1010 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
1011 tmp |= DTE_FLAG_GV;
1012 old_dev_tbl_cpy[devid].data[0] &= ~tmp;
1013 }
1014 }
1015
1016 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
1017 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
1018 int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK;
1019 if (irq_v && (int_ctl || int_tab_len)) {
1020 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
1021 (int_tab_len != DTE_INTTABLEN)) {
1022 pr_err("Wrong old irq remapping flag: %#x\n", devid);
1023 return false;
1024 }
1025
1026 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
1027 }
1028 }
1029 memunmap(old_devtb);
1030
1031 return true;
1032}
1033
1034void amd_iommu_apply_erratum_63(u16 devid)
1035{
1036 int sysmgt;
1037
1038 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
1039 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
1040
1041 if (sysmgt == 0x01)
1042 set_dev_entry_bit(devid, DEV_ENTRY_IW);
1043}
1044
1045
1046static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
1047{
1048 amd_iommu_rlookup_table[devid] = iommu;
1049}
1050
1051
1052
1053
1054
1055static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
1056 u16 devid, u32 flags, u32 ext_flags)
1057{
1058 if (flags & ACPI_DEVFLAG_INITPASS)
1059 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
1060 if (flags & ACPI_DEVFLAG_EXTINT)
1061 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
1062 if (flags & ACPI_DEVFLAG_NMI)
1063 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
1064 if (flags & ACPI_DEVFLAG_SYSMGT1)
1065 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
1066 if (flags & ACPI_DEVFLAG_SYSMGT2)
1067 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
1068 if (flags & ACPI_DEVFLAG_LINT0)
1069 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
1070 if (flags & ACPI_DEVFLAG_LINT1)
1071 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
1072
1073 amd_iommu_apply_erratum_63(devid);
1074
1075 set_iommu_for_device(iommu, devid);
1076}
1077
1078int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
1079{
1080 struct devid_map *entry;
1081 struct list_head *list;
1082
1083 if (type == IVHD_SPECIAL_IOAPIC)
1084 list = &ioapic_map;
1085 else if (type == IVHD_SPECIAL_HPET)
1086 list = &hpet_map;
1087 else
1088 return -EINVAL;
1089
1090 list_for_each_entry(entry, list, list) {
1091 if (!(entry->id == id && entry->cmd_line))
1092 continue;
1093
1094 pr_info("Command-line override present for %s id %d - ignoring\n",
1095 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1096
1097 *devid = entry->devid;
1098
1099 return 0;
1100 }
1101
1102 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1103 if (!entry)
1104 return -ENOMEM;
1105
1106 entry->id = id;
1107 entry->devid = *devid;
1108 entry->cmd_line = cmd_line;
1109
1110 list_add_tail(&entry->list, list);
1111
1112 return 0;
1113}
1114
1115static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
1116 bool cmd_line)
1117{
1118 struct acpihid_map_entry *entry;
1119 struct list_head *list = &acpihid_map;
1120
1121 list_for_each_entry(entry, list, list) {
1122 if (strcmp(entry->hid, hid) ||
1123 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1124 !entry->cmd_line)
1125 continue;
1126
1127 pr_info("Command-line override for hid:%s uid:%s\n",
1128 hid, uid);
1129 *devid = entry->devid;
1130 return 0;
1131 }
1132
1133 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1134 if (!entry)
1135 return -ENOMEM;
1136
1137 memcpy(entry->uid, uid, strlen(uid));
1138 memcpy(entry->hid, hid, strlen(hid));
1139 entry->devid = *devid;
1140 entry->cmd_line = cmd_line;
1141 entry->root_devid = (entry->devid & (~0x7));
1142
1143 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
1144 entry->cmd_line ? "cmd" : "ivrs",
1145 entry->hid, entry->uid, entry->root_devid);
1146
1147 list_add_tail(&entry->list, list);
1148 return 0;
1149}
1150
1151static int __init add_early_maps(void)
1152{
1153 int i, ret;
1154
1155 for (i = 0; i < early_ioapic_map_size; ++i) {
1156 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1157 early_ioapic_map[i].id,
1158 &early_ioapic_map[i].devid,
1159 early_ioapic_map[i].cmd_line);
1160 if (ret)
1161 return ret;
1162 }
1163
1164 for (i = 0; i < early_hpet_map_size; ++i) {
1165 ret = add_special_device(IVHD_SPECIAL_HPET,
1166 early_hpet_map[i].id,
1167 &early_hpet_map[i].devid,
1168 early_hpet_map[i].cmd_line);
1169 if (ret)
1170 return ret;
1171 }
1172
1173 for (i = 0; i < early_acpihid_map_size; ++i) {
1174 ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1175 early_acpihid_map[i].uid,
1176 &early_acpihid_map[i].devid,
1177 early_acpihid_map[i].cmd_line);
1178 if (ret)
1179 return ret;
1180 }
1181
1182 return 0;
1183}
1184
1185
1186
1187
1188
1189static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1190 struct ivhd_header *h)
1191{
1192 u8 *p = (u8 *)h;
1193 u8 *end = p, flags = 0;
1194 u16 devid = 0, devid_start = 0, devid_to = 0;
1195 u32 dev_i, ext_flags = 0;
1196 bool alias = false;
1197 struct ivhd_entry *e;
1198 u32 ivhd_size;
1199 int ret;
1200
1201
1202 ret = add_early_maps();
1203 if (ret)
1204 return ret;
1205
1206 amd_iommu_apply_ivrs_quirks();
1207
1208
1209
1210
1211 iommu->acpi_flags = h->flags;
1212
1213
1214
1215
1216 ivhd_size = get_ivhd_header_size(h);
1217 if (!ivhd_size) {
1218 pr_err("Unsupported IVHD type %#x\n", h->type);
1219 return -EINVAL;
1220 }
1221
1222 p += ivhd_size;
1223
1224 end += h->length;
1225
1226
1227 while (p < end) {
1228 e = (struct ivhd_entry *)p;
1229 switch (e->type) {
1230 case IVHD_DEV_ALL:
1231
1232 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
1233
1234 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1235 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1236 break;
1237 case IVHD_DEV_SELECT:
1238
1239 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1240 "flags: %02x\n",
1241 PCI_BUS_NUM(e->devid),
1242 PCI_SLOT(e->devid),
1243 PCI_FUNC(e->devid),
1244 e->flags);
1245
1246 devid = e->devid;
1247 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1248 break;
1249 case IVHD_DEV_SELECT_RANGE_START:
1250
1251 DUMP_printk(" DEV_SELECT_RANGE_START\t "
1252 "devid: %02x:%02x.%x flags: %02x\n",
1253 PCI_BUS_NUM(e->devid),
1254 PCI_SLOT(e->devid),
1255 PCI_FUNC(e->devid),
1256 e->flags);
1257
1258 devid_start = e->devid;
1259 flags = e->flags;
1260 ext_flags = 0;
1261 alias = false;
1262 break;
1263 case IVHD_DEV_ALIAS:
1264
1265 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1266 "flags: %02x devid_to: %02x:%02x.%x\n",
1267 PCI_BUS_NUM(e->devid),
1268 PCI_SLOT(e->devid),
1269 PCI_FUNC(e->devid),
1270 e->flags,
1271 PCI_BUS_NUM(e->ext >> 8),
1272 PCI_SLOT(e->ext >> 8),
1273 PCI_FUNC(e->ext >> 8));
1274
1275 devid = e->devid;
1276 devid_to = e->ext >> 8;
1277 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
1278 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1279 amd_iommu_alias_table[devid] = devid_to;
1280 break;
1281 case IVHD_DEV_ALIAS_RANGE:
1282
1283 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1284 "devid: %02x:%02x.%x flags: %02x "
1285 "devid_to: %02x:%02x.%x\n",
1286 PCI_BUS_NUM(e->devid),
1287 PCI_SLOT(e->devid),
1288 PCI_FUNC(e->devid),
1289 e->flags,
1290 PCI_BUS_NUM(e->ext >> 8),
1291 PCI_SLOT(e->ext >> 8),
1292 PCI_FUNC(e->ext >> 8));
1293
1294 devid_start = e->devid;
1295 flags = e->flags;
1296 devid_to = e->ext >> 8;
1297 ext_flags = 0;
1298 alias = true;
1299 break;
1300 case IVHD_DEV_EXT_SELECT:
1301
1302 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1303 "flags: %02x ext: %08x\n",
1304 PCI_BUS_NUM(e->devid),
1305 PCI_SLOT(e->devid),
1306 PCI_FUNC(e->devid),
1307 e->flags, e->ext);
1308
1309 devid = e->devid;
1310 set_dev_entry_from_acpi(iommu, devid, e->flags,
1311 e->ext);
1312 break;
1313 case IVHD_DEV_EXT_SELECT_RANGE:
1314
1315 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1316 "%02x:%02x.%x flags: %02x ext: %08x\n",
1317 PCI_BUS_NUM(e->devid),
1318 PCI_SLOT(e->devid),
1319 PCI_FUNC(e->devid),
1320 e->flags, e->ext);
1321
1322 devid_start = e->devid;
1323 flags = e->flags;
1324 ext_flags = e->ext;
1325 alias = false;
1326 break;
1327 case IVHD_DEV_RANGE_END:
1328
1329 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
1330 PCI_BUS_NUM(e->devid),
1331 PCI_SLOT(e->devid),
1332 PCI_FUNC(e->devid));
1333
1334 devid = e->devid;
1335 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1336 if (alias) {
1337 amd_iommu_alias_table[dev_i] = devid_to;
1338 set_dev_entry_from_acpi(iommu,
1339 devid_to, flags, ext_flags);
1340 }
1341 set_dev_entry_from_acpi(iommu, dev_i,
1342 flags, ext_flags);
1343 }
1344 break;
1345 case IVHD_DEV_SPECIAL: {
1346 u8 handle, type;
1347 const char *var;
1348 u16 devid;
1349 int ret;
1350
1351 handle = e->ext & 0xff;
1352 devid = (e->ext >> 8) & 0xffff;
1353 type = (e->ext >> 24) & 0xff;
1354
1355 if (type == IVHD_SPECIAL_IOAPIC)
1356 var = "IOAPIC";
1357 else if (type == IVHD_SPECIAL_HPET)
1358 var = "HPET";
1359 else
1360 var = "UNKNOWN";
1361
1362 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1363 var, (int)handle,
1364 PCI_BUS_NUM(devid),
1365 PCI_SLOT(devid),
1366 PCI_FUNC(devid));
1367
1368 ret = add_special_device(type, handle, &devid, false);
1369 if (ret)
1370 return ret;
1371
1372
1373
1374
1375
1376
1377 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1378
1379 break;
1380 }
1381 case IVHD_DEV_ACPI_HID: {
1382 u16 devid;
1383 u8 hid[ACPIHID_HID_LEN];
1384 u8 uid[ACPIHID_UID_LEN];
1385 int ret;
1386
1387 if (h->type != 0x40) {
1388 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1389 e->type);
1390 break;
1391 }
1392
1393 BUILD_BUG_ON(sizeof(e->ext_hid) != ACPIHID_HID_LEN - 1);
1394 memcpy(hid, &e->ext_hid, ACPIHID_HID_LEN - 1);
1395 hid[ACPIHID_HID_LEN - 1] = '\0';
1396
1397 if (!(*hid)) {
1398 pr_err(FW_BUG "Invalid HID.\n");
1399 break;
1400 }
1401
1402 uid[0] = '\0';
1403 switch (e->uidf) {
1404 case UID_NOT_PRESENT:
1405
1406 if (e->uidl != 0)
1407 pr_warn(FW_BUG "Invalid UID length.\n");
1408
1409 break;
1410 case UID_IS_INTEGER:
1411
1412 sprintf(uid, "%d", e->uid);
1413
1414 break;
1415 case UID_IS_CHARACTER:
1416
1417 memcpy(uid, &e->uid, e->uidl);
1418 uid[e->uidl] = '\0';
1419
1420 break;
1421 default:
1422 break;
1423 }
1424
1425 devid = e->devid;
1426 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1427 hid, uid,
1428 PCI_BUS_NUM(devid),
1429 PCI_SLOT(devid),
1430 PCI_FUNC(devid));
1431
1432 flags = e->flags;
1433
1434 ret = add_acpi_hid_device(hid, uid, &devid, false);
1435 if (ret)
1436 return ret;
1437
1438
1439
1440
1441
1442
1443 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1444
1445 break;
1446 }
1447 default:
1448 break;
1449 }
1450
1451 p += ivhd_entry_length(p);
1452 }
1453
1454 return 0;
1455}
1456
1457static void __init free_iommu_one(struct amd_iommu *iommu)
1458{
1459 free_cwwb_sem(iommu);
1460 free_command_buffer(iommu);
1461 free_event_buffer(iommu);
1462 free_ppr_log(iommu);
1463 free_ga_log(iommu);
1464 iommu_unmap_mmio_space(iommu);
1465}
1466
1467static void __init free_iommu_all(void)
1468{
1469 struct amd_iommu *iommu, *next;
1470
1471 for_each_iommu_safe(iommu, next) {
1472 list_del(&iommu->list);
1473 free_iommu_one(iommu);
1474 kfree(iommu);
1475 }
1476}
1477
1478
1479
1480
1481
1482
1483
1484static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1485{
1486 u32 value;
1487
1488 if ((boot_cpu_data.x86 != 0x15) ||
1489 (boot_cpu_data.x86_model < 0x10) ||
1490 (boot_cpu_data.x86_model > 0x1f))
1491 return;
1492
1493 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1494 pci_read_config_dword(iommu->dev, 0xf4, &value);
1495
1496 if (value & BIT(2))
1497 return;
1498
1499
1500 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1501
1502 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1503 pci_info(iommu->dev, "Applying erratum 746 workaround\n");
1504
1505
1506 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1507}
1508
1509
1510
1511
1512
1513
1514
1515static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1516{
1517 u32 value;
1518
1519 if ((boot_cpu_data.x86 != 0x15) ||
1520 (boot_cpu_data.x86_model < 0x30) ||
1521 (boot_cpu_data.x86_model > 0x3f))
1522 return;
1523
1524
1525 value = iommu_read_l2(iommu, 0x47);
1526
1527 if (value & BIT(0))
1528 return;
1529
1530
1531 iommu_write_l2(iommu, 0x47, value | BIT(0));
1532
1533 pci_info(iommu->dev, "Applying ATS write check workaround\n");
1534}
1535
1536
1537
1538
1539
1540
1541static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1542{
1543 int ret;
1544
1545 raw_spin_lock_init(&iommu->lock);
1546 iommu->cmd_sem_val = 0;
1547
1548
1549 list_add_tail(&iommu->list, &amd_iommu_list);
1550 iommu->index = amd_iommus_present++;
1551
1552 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1553 WARN(1, "System has more IOMMUs than supported by this driver\n");
1554 return -ENOSYS;
1555 }
1556
1557
1558 amd_iommus[iommu->index] = iommu;
1559
1560
1561
1562
1563 iommu->devid = h->devid;
1564 iommu->cap_ptr = h->cap_ptr;
1565 iommu->pci_seg = h->pci_seg;
1566 iommu->mmio_phys = h->mmio_phys;
1567
1568 switch (h->type) {
1569 case 0x10:
1570
1571 if ((h->efr_attr != 0) &&
1572 ((h->efr_attr & (0xF << 13)) != 0) &&
1573 ((h->efr_attr & (0x3F << 17)) != 0))
1574 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1575 else
1576 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1577
1578
1579
1580
1581
1582
1583 if (!boot_cpu_has(X86_FEATURE_CX16) ||
1584 ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1585 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1586 break;
1587 case 0x11:
1588 case 0x40:
1589 if (h->efr_reg & (1 << 9))
1590 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1591 else
1592 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1593
1594
1595
1596
1597
1598
1599 if (!boot_cpu_has(X86_FEATURE_CX16) ||
1600 ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
1601 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1602 break;
1603 }
1604
1605 if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
1606 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
1607
1608 early_iommu_features_init(iommu, h);
1609
1610 break;
1611 default:
1612 return -EINVAL;
1613 }
1614
1615 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1616 iommu->mmio_phys_end);
1617 if (!iommu->mmio_base)
1618 return -ENOMEM;
1619
1620 if (alloc_cwwb_sem(iommu))
1621 return -ENOMEM;
1622
1623 if (alloc_command_buffer(iommu))
1624 return -ENOMEM;
1625
1626 if (alloc_event_buffer(iommu))
1627 return -ENOMEM;
1628
1629 iommu->int_enabled = false;
1630
1631 init_translation_status(iommu);
1632 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1633 iommu_disable(iommu);
1634 clear_translation_pre_enabled(iommu);
1635 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1636 iommu->index);
1637 }
1638 if (amd_iommu_pre_enabled)
1639 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1640
1641 ret = init_iommu_from_acpi(iommu, h);
1642 if (ret)
1643 return ret;
1644
1645 if (amd_iommu_irq_remap) {
1646 ret = amd_iommu_create_irq_domain(iommu);
1647 if (ret)
1648 return ret;
1649 }
1650
1651
1652
1653
1654
1655 amd_iommu_rlookup_table[iommu->devid] = NULL;
1656
1657 return 0;
1658}
1659
1660
1661
1662
1663
1664
1665
1666static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1667{
1668 u8 *base = (u8 *)ivrs;
1669 struct ivhd_header *ivhd = (struct ivhd_header *)
1670 (base + IVRS_HEADER_LENGTH);
1671 u8 last_type = ivhd->type;
1672 u16 devid = ivhd->devid;
1673
1674 while (((u8 *)ivhd - base < ivrs->length) &&
1675 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1676 u8 *p = (u8 *) ivhd;
1677
1678 if (ivhd->devid == devid)
1679 last_type = ivhd->type;
1680 ivhd = (struct ivhd_header *)(p + ivhd->length);
1681 }
1682
1683 return last_type;
1684}
1685
1686
1687
1688
1689
1690static int __init init_iommu_all(struct acpi_table_header *table)
1691{
1692 u8 *p = (u8 *)table, *end = (u8 *)table;
1693 struct ivhd_header *h;
1694 struct amd_iommu *iommu;
1695 int ret;
1696
1697 end += table->length;
1698 p += IVRS_HEADER_LENGTH;
1699
1700 while (p < end) {
1701 h = (struct ivhd_header *)p;
1702 if (*p == amd_iommu_target_ivhd_type) {
1703
1704 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1705 "seg: %d flags: %01x info %04x\n",
1706 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1707 PCI_FUNC(h->devid), h->cap_ptr,
1708 h->pci_seg, h->flags, h->info);
1709 DUMP_printk(" mmio-addr: %016llx\n",
1710 h->mmio_phys);
1711
1712 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1713 if (iommu == NULL)
1714 return -ENOMEM;
1715
1716 ret = init_iommu_one(iommu, h);
1717 if (ret)
1718 return ret;
1719 }
1720 p += h->length;
1721
1722 }
1723 WARN_ON(p != end);
1724
1725 return 0;
1726}
1727
1728static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1729{
1730 u64 val;
1731 struct pci_dev *pdev = iommu->dev;
1732
1733 if (!iommu_feature(iommu, FEATURE_PC))
1734 return;
1735
1736 amd_iommu_pc_present = true;
1737
1738 pci_info(pdev, "IOMMU performance counters supported\n");
1739
1740 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1741 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1742 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1743
1744 return;
1745}
1746
1747static ssize_t amd_iommu_show_cap(struct device *dev,
1748 struct device_attribute *attr,
1749 char *buf)
1750{
1751 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1752 return sprintf(buf, "%x\n", iommu->cap);
1753}
1754static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1755
1756static ssize_t amd_iommu_show_features(struct device *dev,
1757 struct device_attribute *attr,
1758 char *buf)
1759{
1760 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1761 return sprintf(buf, "%llx\n", iommu->features);
1762}
1763static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1764
1765static struct attribute *amd_iommu_attrs[] = {
1766 &dev_attr_cap.attr,
1767 &dev_attr_features.attr,
1768 NULL,
1769};
1770
1771static struct attribute_group amd_iommu_group = {
1772 .name = "amd-iommu",
1773 .attrs = amd_iommu_attrs,
1774};
1775
1776static const struct attribute_group *amd_iommu_groups[] = {
1777 &amd_iommu_group,
1778 NULL,
1779};
1780
1781
1782
1783
1784
1785
1786static void __init late_iommu_features_init(struct amd_iommu *iommu)
1787{
1788 u64 features;
1789
1790 if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
1791 return;
1792
1793
1794 features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
1795
1796 if (!iommu->features) {
1797 iommu->features = features;
1798 return;
1799 }
1800
1801
1802
1803
1804
1805 if (features != iommu->features)
1806 pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx).\n",
1807 features, iommu->features);
1808}
1809
1810static int __init iommu_init_pci(struct amd_iommu *iommu)
1811{
1812 int cap_ptr = iommu->cap_ptr;
1813 int ret;
1814
1815 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
1816 iommu->devid & 0xff);
1817 if (!iommu->dev)
1818 return -ENODEV;
1819
1820
1821 iommu->dev->match_driver = false;
1822
1823 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1824 &iommu->cap);
1825
1826 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1827 amd_iommu_iotlb_sup = false;
1828
1829 late_iommu_features_init(iommu);
1830
1831 if (iommu_feature(iommu, FEATURE_GT)) {
1832 int glxval;
1833 u32 max_pasid;
1834 u64 pasmax;
1835
1836 pasmax = iommu->features & FEATURE_PASID_MASK;
1837 pasmax >>= FEATURE_PASID_SHIFT;
1838 max_pasid = (1 << (pasmax + 1)) - 1;
1839
1840 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1841
1842 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
1843
1844 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1845 glxval >>= FEATURE_GLXVAL_SHIFT;
1846
1847 if (amd_iommu_max_glx_val == -1)
1848 amd_iommu_max_glx_val = glxval;
1849 else
1850 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1851 }
1852
1853 if (iommu_feature(iommu, FEATURE_GT) &&
1854 iommu_feature(iommu, FEATURE_PPR)) {
1855 iommu->is_iommu_v2 = true;
1856 amd_iommu_v2_present = true;
1857 }
1858
1859 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1860 return -ENOMEM;
1861
1862 ret = iommu_init_ga_log(iommu);
1863 if (ret)
1864 return ret;
1865
1866 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
1867 pr_info("Using strict mode due to virtualization\n");
1868 iommu_set_dma_strict();
1869 amd_iommu_np_cache = true;
1870 }
1871
1872 init_iommu_perf_ctr(iommu);
1873
1874 if (is_rd890_iommu(iommu->dev)) {
1875 int i, j;
1876
1877 iommu->root_pdev =
1878 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
1879 PCI_DEVFN(0, 0));
1880
1881
1882
1883
1884
1885
1886 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1887 &iommu->stored_addr_lo);
1888 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1889 &iommu->stored_addr_hi);
1890
1891
1892 iommu->stored_addr_lo &= ~1;
1893
1894 for (i = 0; i < 6; i++)
1895 for (j = 0; j < 0x12; j++)
1896 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1897
1898 for (i = 0; i < 0x83; i++)
1899 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1900 }
1901
1902 amd_iommu_erratum_746_workaround(iommu);
1903 amd_iommu_ats_write_check_workaround(iommu);
1904
1905 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1906 amd_iommu_groups, "ivhd%d", iommu->index);
1907 iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
1908
1909 return pci_enable_device(iommu->dev);
1910}
1911
1912static void print_iommu_info(void)
1913{
1914 static const char * const feat_str[] = {
1915 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1916 "IA", "GA", "HE", "PC"
1917 };
1918 struct amd_iommu *iommu;
1919
1920 for_each_iommu(iommu) {
1921 struct pci_dev *pdev = iommu->dev;
1922 int i;
1923
1924 pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
1925
1926 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1927 pr_info("Extended features (%#llx):", iommu->features);
1928
1929 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1930 if (iommu_feature(iommu, (1ULL << i)))
1931 pr_cont(" %s", feat_str[i]);
1932 }
1933
1934 if (iommu->features & FEATURE_GAM_VAPIC)
1935 pr_cont(" GA_vAPIC");
1936
1937 pr_cont("\n");
1938 }
1939 }
1940 if (irq_remapping_enabled) {
1941 pr_info("Interrupt remapping enabled\n");
1942 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
1943 pr_info("Virtual APIC enabled\n");
1944 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
1945 pr_info("X2APIC enabled\n");
1946 }
1947}
1948
1949static int __init amd_iommu_init_pci(void)
1950{
1951 struct amd_iommu *iommu;
1952 int ret;
1953
1954 for_each_iommu(iommu) {
1955 ret = iommu_init_pci(iommu);
1956 if (ret)
1957 break;
1958
1959
1960 iommu_set_cwwb_range(iommu);
1961 }
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973 ret = amd_iommu_init_api();
1974
1975 init_device_table_dma();
1976
1977 for_each_iommu(iommu)
1978 iommu_flush_all_caches(iommu);
1979
1980 if (!ret)
1981 print_iommu_info();
1982
1983 return ret;
1984}
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995static int iommu_setup_msi(struct amd_iommu *iommu)
1996{
1997 int r;
1998
1999 r = pci_enable_msi(iommu->dev);
2000 if (r)
2001 return r;
2002
2003 r = request_threaded_irq(iommu->dev->irq,
2004 amd_iommu_int_handler,
2005 amd_iommu_int_thread,
2006 0, "AMD-Vi",
2007 iommu);
2008
2009 if (r) {
2010 pci_disable_msi(iommu->dev);
2011 return r;
2012 }
2013
2014 return 0;
2015}
2016
2017union intcapxt {
2018 u64 capxt;
2019 struct {
2020 u64 reserved_0 : 2,
2021 dest_mode_logical : 1,
2022 reserved_1 : 5,
2023 destid_0_23 : 24,
2024 vector : 8,
2025 reserved_2 : 16,
2026 destid_24_31 : 8;
2027 };
2028} __attribute__ ((packed));
2029
2030
2031static struct irq_chip intcapxt_controller;
2032
2033static int intcapxt_irqdomain_activate(struct irq_domain *domain,
2034 struct irq_data *irqd, bool reserve)
2035{
2036 return 0;
2037}
2038
2039static void intcapxt_irqdomain_deactivate(struct irq_domain *domain,
2040 struct irq_data *irqd)
2041{
2042}
2043
2044
2045static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
2046 unsigned int nr_irqs, void *arg)
2047{
2048 struct irq_alloc_info *info = arg;
2049 int i, ret;
2050
2051 if (!info || info->type != X86_IRQ_ALLOC_TYPE_AMDVI)
2052 return -EINVAL;
2053
2054 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
2055 if (ret < 0)
2056 return ret;
2057
2058 for (i = virq; i < virq + nr_irqs; i++) {
2059 struct irq_data *irqd = irq_domain_get_irq_data(domain, i);
2060
2061 irqd->chip = &intcapxt_controller;
2062 irqd->chip_data = info->data;
2063 __irq_set_handler(i, handle_edge_irq, 0, "edge");
2064 }
2065
2066 return ret;
2067}
2068
2069static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq,
2070 unsigned int nr_irqs)
2071{
2072 irq_domain_free_irqs_top(domain, virq, nr_irqs);
2073}
2074
2075
2076static void intcapxt_unmask_irq(struct irq_data *irqd)
2077{
2078 struct amd_iommu *iommu = irqd->chip_data;
2079 struct irq_cfg *cfg = irqd_cfg(irqd);
2080 union intcapxt xt;
2081
2082 xt.capxt = 0ULL;
2083 xt.dest_mode_logical = apic->dest_mode_logical;
2084 xt.vector = cfg->vector;
2085 xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0);
2086 xt.destid_24_31 = cfg->dest_apicid >> 24;
2087
2088
2089
2090
2091
2092 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
2093 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
2094 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
2095}
2096
2097static void intcapxt_mask_irq(struct irq_data *irqd)
2098{
2099 struct amd_iommu *iommu = irqd->chip_data;
2100
2101 writeq(0, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
2102 writeq(0, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
2103 writeq(0, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
2104}
2105
2106
2107static int intcapxt_set_affinity(struct irq_data *irqd,
2108 const struct cpumask *mask, bool force)
2109{
2110 struct irq_data *parent = irqd->parent_data;
2111 int ret;
2112
2113 ret = parent->chip->irq_set_affinity(parent, mask, force);
2114 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
2115 return ret;
2116 return 0;
2117}
2118
2119static int intcapxt_set_wake(struct irq_data *irqd, unsigned int on)
2120{
2121 return on ? -EOPNOTSUPP : 0;
2122}
2123
2124static struct irq_chip intcapxt_controller = {
2125 .name = "IOMMU-MSI",
2126 .irq_unmask = intcapxt_unmask_irq,
2127 .irq_mask = intcapxt_mask_irq,
2128 .irq_ack = irq_chip_ack_parent,
2129 .irq_retrigger = irq_chip_retrigger_hierarchy,
2130 .irq_set_affinity = intcapxt_set_affinity,
2131 .irq_set_wake = intcapxt_set_wake,
2132 .flags = IRQCHIP_MASK_ON_SUSPEND,
2133};
2134
2135static const struct irq_domain_ops intcapxt_domain_ops = {
2136 .alloc = intcapxt_irqdomain_alloc,
2137 .free = intcapxt_irqdomain_free,
2138 .activate = intcapxt_irqdomain_activate,
2139 .deactivate = intcapxt_irqdomain_deactivate,
2140};
2141
2142
2143static struct irq_domain *iommu_irqdomain;
2144
2145static struct irq_domain *iommu_get_irqdomain(void)
2146{
2147 struct fwnode_handle *fn;
2148
2149
2150 if (iommu_irqdomain)
2151 return iommu_irqdomain;
2152
2153 fn = irq_domain_alloc_named_fwnode("AMD-Vi-MSI");
2154 if (!fn)
2155 return NULL;
2156
2157 iommu_irqdomain = irq_domain_create_hierarchy(x86_vector_domain, 0, 0,
2158 fn, &intcapxt_domain_ops,
2159 NULL);
2160 if (!iommu_irqdomain)
2161 irq_domain_free_fwnode(fn);
2162
2163 return iommu_irqdomain;
2164}
2165
2166static int iommu_setup_intcapxt(struct amd_iommu *iommu)
2167{
2168 struct irq_domain *domain;
2169 struct irq_alloc_info info;
2170 int irq, ret;
2171
2172 domain = iommu_get_irqdomain();
2173 if (!domain)
2174 return -ENXIO;
2175
2176 init_irq_alloc_info(&info, NULL);
2177 info.type = X86_IRQ_ALLOC_TYPE_AMDVI;
2178 info.data = iommu;
2179
2180 irq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info);
2181 if (irq < 0) {
2182 irq_domain_remove(domain);
2183 return irq;
2184 }
2185
2186 ret = request_threaded_irq(irq, amd_iommu_int_handler,
2187 amd_iommu_int_thread, 0, "AMD-Vi", iommu);
2188 if (ret) {
2189 irq_domain_free_irqs(irq, 1);
2190 irq_domain_remove(domain);
2191 return ret;
2192 }
2193
2194 return 0;
2195}
2196
2197static int iommu_init_irq(struct amd_iommu *iommu)
2198{
2199 int ret;
2200
2201 if (iommu->int_enabled)
2202 goto enable_faults;
2203
2204 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2205 ret = iommu_setup_intcapxt(iommu);
2206 else if (iommu->dev->msi_cap)
2207 ret = iommu_setup_msi(iommu);
2208 else
2209 ret = -ENODEV;
2210
2211 if (ret)
2212 return ret;
2213
2214 iommu->int_enabled = true;
2215enable_faults:
2216
2217 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2218 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2219
2220 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
2221
2222 if (iommu->ppr_log != NULL)
2223 iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
2224
2225 iommu_ga_log_enable(iommu);
2226
2227 return 0;
2228}
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238static void __init free_unity_maps(void)
2239{
2240 struct unity_map_entry *entry, *next;
2241
2242 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
2243 list_del(&entry->list);
2244 kfree(entry);
2245 }
2246}
2247
2248
2249static int __init init_unity_map_range(struct ivmd_header *m)
2250{
2251 struct unity_map_entry *e = NULL;
2252 char *s;
2253
2254 e = kzalloc(sizeof(*e), GFP_KERNEL);
2255 if (e == NULL)
2256 return -ENOMEM;
2257
2258 switch (m->type) {
2259 default:
2260 kfree(e);
2261 return 0;
2262 case ACPI_IVMD_TYPE:
2263 s = "IVMD_TYPEi\t\t\t";
2264 e->devid_start = e->devid_end = m->devid;
2265 break;
2266 case ACPI_IVMD_TYPE_ALL:
2267 s = "IVMD_TYPE_ALL\t\t";
2268 e->devid_start = 0;
2269 e->devid_end = amd_iommu_last_bdf;
2270 break;
2271 case ACPI_IVMD_TYPE_RANGE:
2272 s = "IVMD_TYPE_RANGE\t\t";
2273 e->devid_start = m->devid;
2274 e->devid_end = m->aux;
2275 break;
2276 }
2277 e->address_start = PAGE_ALIGN(m->range_start);
2278 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2279 e->prot = m->flags >> 1;
2280
2281
2282
2283
2284
2285
2286
2287
2288 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2289 e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
2290
2291 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
2292 " range_start: %016llx range_end: %016llx flags: %x\n", s,
2293 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2294 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
2295 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2296 e->address_start, e->address_end, m->flags);
2297
2298 list_add_tail(&e->list, &amd_iommu_unity_map);
2299
2300 return 0;
2301}
2302
2303
2304static int __init init_memory_definitions(struct acpi_table_header *table)
2305{
2306 u8 *p = (u8 *)table, *end = (u8 *)table;
2307 struct ivmd_header *m;
2308
2309 end += table->length;
2310 p += IVRS_HEADER_LENGTH;
2311
2312 while (p < end) {
2313 m = (struct ivmd_header *)p;
2314 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
2315 init_unity_map_range(m);
2316
2317 p += m->length;
2318 }
2319
2320 return 0;
2321}
2322
2323
2324
2325
2326static void init_device_table_dma(void)
2327{
2328 u32 devid;
2329
2330 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2331 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
2332 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
2333 }
2334}
2335
2336static void __init uninit_device_table_dma(void)
2337{
2338 u32 devid;
2339
2340 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2341 amd_iommu_dev_table[devid].data[0] = 0ULL;
2342 amd_iommu_dev_table[devid].data[1] = 0ULL;
2343 }
2344}
2345
2346static void init_device_table(void)
2347{
2348 u32 devid;
2349
2350 if (!amd_iommu_irq_remap)
2351 return;
2352
2353 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2354 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
2355}
2356
2357static void iommu_init_flags(struct amd_iommu *iommu)
2358{
2359 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2360 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2361 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2362
2363 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2364 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2365 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2366
2367 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2368 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2369 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2370
2371 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2372 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2373 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2374
2375
2376
2377
2378 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2379
2380
2381 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
2382}
2383
2384static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2385{
2386 int i, j;
2387 u32 ioc_feature_control;
2388 struct pci_dev *pdev = iommu->root_pdev;
2389
2390
2391 if (!is_rd890_iommu(iommu->dev) || !pdev)
2392 return;
2393
2394
2395
2396
2397
2398
2399
2400 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2401 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2402
2403
2404 if (!(ioc_feature_control & 0x1))
2405 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2406
2407
2408 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2409 iommu->stored_addr_lo);
2410 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2411 iommu->stored_addr_hi);
2412
2413
2414 for (i = 0; i < 6; i++)
2415 for (j = 0; j < 0x12; j++)
2416 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2417
2418
2419 for (i = 0; i < 0x83; i++)
2420 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2421
2422
2423 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2424 iommu->stored_addr_lo | 1);
2425}
2426
2427static void iommu_enable_ga(struct amd_iommu *iommu)
2428{
2429#ifdef CONFIG_IRQ_REMAP
2430 switch (amd_iommu_guest_ir) {
2431 case AMD_IOMMU_GUEST_IR_VAPIC:
2432 iommu_feature_enable(iommu, CONTROL_GAM_EN);
2433 fallthrough;
2434 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2435 iommu_feature_enable(iommu, CONTROL_GA_EN);
2436 iommu->irte_ops = &irte_128_ops;
2437 break;
2438 default:
2439 iommu->irte_ops = &irte_32_ops;
2440 break;
2441 }
2442#endif
2443}
2444
2445static void early_enable_iommu(struct amd_iommu *iommu)
2446{
2447 iommu_disable(iommu);
2448 iommu_init_flags(iommu);
2449 iommu_set_device_table(iommu);
2450 iommu_enable_command_buffer(iommu);
2451 iommu_enable_event_buffer(iommu);
2452 iommu_set_exclusion_range(iommu);
2453 iommu_enable_ga(iommu);
2454 iommu_enable_xt(iommu);
2455 iommu_enable(iommu);
2456 iommu_flush_all_caches(iommu);
2457}
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467static void early_enable_iommus(void)
2468{
2469 struct amd_iommu *iommu;
2470
2471
2472 if (!copy_device_table()) {
2473
2474
2475
2476
2477
2478 if (amd_iommu_pre_enabled)
2479 pr_err("Failed to copy DEV table from previous kernel.\n");
2480 if (old_dev_tbl_cpy != NULL)
2481 free_pages((unsigned long)old_dev_tbl_cpy,
2482 get_order(dev_table_size));
2483
2484 for_each_iommu(iommu) {
2485 clear_translation_pre_enabled(iommu);
2486 early_enable_iommu(iommu);
2487 }
2488 } else {
2489 pr_info("Copied DEV table from previous kernel.\n");
2490 free_pages((unsigned long)amd_iommu_dev_table,
2491 get_order(dev_table_size));
2492 amd_iommu_dev_table = old_dev_tbl_cpy;
2493 for_each_iommu(iommu) {
2494 iommu_disable_command_buffer(iommu);
2495 iommu_disable_event_buffer(iommu);
2496 iommu_enable_command_buffer(iommu);
2497 iommu_enable_event_buffer(iommu);
2498 iommu_enable_ga(iommu);
2499 iommu_enable_xt(iommu);
2500 iommu_set_device_table(iommu);
2501 iommu_flush_all_caches(iommu);
2502 }
2503 }
2504
2505#ifdef CONFIG_IRQ_REMAP
2506
2507
2508
2509
2510 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
2511 !check_feature_on_all_iommus(FEATURE_GAM_VAPIC))
2512 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
2513
2514 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2515 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2516#endif
2517}
2518
2519static void enable_iommus_v2(void)
2520{
2521 struct amd_iommu *iommu;
2522
2523 for_each_iommu(iommu) {
2524 iommu_enable_ppr_log(iommu);
2525 iommu_enable_gt(iommu);
2526 }
2527}
2528
2529static void enable_iommus(void)
2530{
2531 early_enable_iommus();
2532
2533 enable_iommus_v2();
2534}
2535
2536static void disable_iommus(void)
2537{
2538 struct amd_iommu *iommu;
2539
2540 for_each_iommu(iommu)
2541 iommu_disable(iommu);
2542
2543#ifdef CONFIG_IRQ_REMAP
2544 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2545 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2546#endif
2547}
2548
2549
2550
2551
2552
2553
2554static void amd_iommu_resume(void)
2555{
2556 struct amd_iommu *iommu;
2557
2558 for_each_iommu(iommu)
2559 iommu_apply_resume_quirks(iommu);
2560
2561
2562 enable_iommus();
2563
2564 amd_iommu_enable_interrupts();
2565}
2566
2567static int amd_iommu_suspend(void)
2568{
2569
2570 disable_iommus();
2571
2572 return 0;
2573}
2574
2575static struct syscore_ops amd_iommu_syscore_ops = {
2576 .suspend = amd_iommu_suspend,
2577 .resume = amd_iommu_resume,
2578};
2579
2580static void __init free_iommu_resources(void)
2581{
2582 kmemleak_free(irq_lookup_table);
2583 free_pages((unsigned long)irq_lookup_table,
2584 get_order(rlookup_table_size));
2585 irq_lookup_table = NULL;
2586
2587 kmem_cache_destroy(amd_iommu_irq_cache);
2588 amd_iommu_irq_cache = NULL;
2589
2590 free_pages((unsigned long)amd_iommu_rlookup_table,
2591 get_order(rlookup_table_size));
2592 amd_iommu_rlookup_table = NULL;
2593
2594 free_pages((unsigned long)amd_iommu_alias_table,
2595 get_order(alias_table_size));
2596 amd_iommu_alias_table = NULL;
2597
2598 free_pages((unsigned long)amd_iommu_dev_table,
2599 get_order(dev_table_size));
2600 amd_iommu_dev_table = NULL;
2601
2602 free_iommu_all();
2603}
2604
2605
2606#define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2607
2608static bool __init check_ioapic_information(void)
2609{
2610 const char *fw_bug = FW_BUG;
2611 bool ret, has_sb_ioapic;
2612 int idx;
2613
2614 has_sb_ioapic = false;
2615 ret = false;
2616
2617
2618
2619
2620
2621
2622 if (cmdline_maps)
2623 fw_bug = "";
2624
2625 for (idx = 0; idx < nr_ioapics; idx++) {
2626 int devid, id = mpc_ioapic_id(idx);
2627
2628 devid = get_ioapic_devid(id);
2629 if (devid < 0) {
2630 pr_err("%s: IOAPIC[%d] not in IVRS table\n",
2631 fw_bug, id);
2632 ret = false;
2633 } else if (devid == IOAPIC_SB_DEVID) {
2634 has_sb_ioapic = true;
2635 ret = true;
2636 }
2637 }
2638
2639 if (!has_sb_ioapic) {
2640
2641
2642
2643
2644
2645
2646
2647
2648 pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
2649 }
2650
2651 if (!ret)
2652 pr_err("Disabling interrupt remapping\n");
2653
2654 return ret;
2655}
2656
2657static void __init free_dma_resources(void)
2658{
2659 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2660 get_order(MAX_DOMAIN_ID/8));
2661 amd_iommu_pd_alloc_bitmap = NULL;
2662
2663 free_unity_maps();
2664}
2665
2666static void __init ivinfo_init(void *ivrs)
2667{
2668 amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
2669}
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698static int __init early_amd_iommu_init(void)
2699{
2700 struct acpi_table_header *ivrs_base;
2701 int i, remap_cache_sz, ret;
2702 acpi_status status;
2703
2704 if (!amd_iommu_detected)
2705 return -ENODEV;
2706
2707 status = acpi_get_table("IVRS", 0, &ivrs_base);
2708 if (status == AE_NOT_FOUND)
2709 return -ENODEV;
2710 else if (ACPI_FAILURE(status)) {
2711 const char *err = acpi_format_exception(status);
2712 pr_err("IVRS table error: %s\n", err);
2713 return -EINVAL;
2714 }
2715
2716
2717
2718
2719
2720 ret = check_ivrs_checksum(ivrs_base);
2721 if (ret)
2722 goto out;
2723
2724 ivinfo_init(ivrs_base);
2725
2726 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2727 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2728
2729
2730
2731
2732
2733
2734 ret = find_last_devid_acpi(ivrs_base);
2735 if (ret)
2736 goto out;
2737
2738 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
2739 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2740 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
2741
2742
2743 ret = -ENOMEM;
2744 amd_iommu_dev_table = (void *)__get_free_pages(
2745 GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
2746 get_order(dev_table_size));
2747 if (amd_iommu_dev_table == NULL)
2748 goto out;
2749
2750
2751
2752
2753
2754 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2755 get_order(alias_table_size));
2756 if (amd_iommu_alias_table == NULL)
2757 goto out;
2758
2759
2760 amd_iommu_rlookup_table = (void *)__get_free_pages(
2761 GFP_KERNEL | __GFP_ZERO,
2762 get_order(rlookup_table_size));
2763 if (amd_iommu_rlookup_table == NULL)
2764 goto out;
2765
2766 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2767 GFP_KERNEL | __GFP_ZERO,
2768 get_order(MAX_DOMAIN_ID/8));
2769 if (amd_iommu_pd_alloc_bitmap == NULL)
2770 goto out;
2771
2772
2773
2774
2775 for (i = 0; i <= amd_iommu_last_bdf; ++i)
2776 amd_iommu_alias_table[i] = i;
2777
2778
2779
2780
2781
2782 __set_bit(0, amd_iommu_pd_alloc_bitmap);
2783
2784
2785
2786
2787
2788 ret = init_iommu_all(ivrs_base);
2789 if (ret)
2790 goto out;
2791
2792
2793 if (!is_kdump_kernel() || amd_iommu_disabled)
2794 disable_iommus();
2795
2796 if (amd_iommu_irq_remap)
2797 amd_iommu_irq_remap = check_ioapic_information();
2798
2799 if (amd_iommu_irq_remap) {
2800
2801
2802
2803
2804 ret = -ENOMEM;
2805 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2806 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2807 else
2808 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
2809 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
2810 remap_cache_sz,
2811 DTE_INTTAB_ALIGNMENT,
2812 0, NULL);
2813 if (!amd_iommu_irq_cache)
2814 goto out;
2815
2816 irq_lookup_table = (void *)__get_free_pages(
2817 GFP_KERNEL | __GFP_ZERO,
2818 get_order(rlookup_table_size));
2819 kmemleak_alloc(irq_lookup_table, rlookup_table_size,
2820 1, GFP_KERNEL);
2821 if (!irq_lookup_table)
2822 goto out;
2823 }
2824
2825 ret = init_memory_definitions(ivrs_base);
2826 if (ret)
2827 goto out;
2828
2829
2830 init_device_table();
2831
2832out:
2833
2834 acpi_put_table(ivrs_base);
2835
2836 return ret;
2837}
2838
2839static int amd_iommu_enable_interrupts(void)
2840{
2841 struct amd_iommu *iommu;
2842 int ret = 0;
2843
2844 for_each_iommu(iommu) {
2845 ret = iommu_init_irq(iommu);
2846 if (ret)
2847 goto out;
2848 }
2849
2850out:
2851 return ret;
2852}
2853
2854static bool __init detect_ivrs(void)
2855{
2856 struct acpi_table_header *ivrs_base;
2857 acpi_status status;
2858 int i;
2859
2860 status = acpi_get_table("IVRS", 0, &ivrs_base);
2861 if (status == AE_NOT_FOUND)
2862 return false;
2863 else if (ACPI_FAILURE(status)) {
2864 const char *err = acpi_format_exception(status);
2865 pr_err("IVRS table error: %s\n", err);
2866 return false;
2867 }
2868
2869 acpi_put_table(ivrs_base);
2870
2871 if (amd_iommu_force_enable)
2872 goto out;
2873
2874
2875 for (i = 0; i < 32; i++) {
2876 u32 pci_id;
2877
2878 pci_id = read_pci_config(0, i, 0, 0);
2879 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
2880 pr_info("Disable IOMMU on Stoney Ridge\n");
2881 return false;
2882 }
2883 }
2884
2885out:
2886
2887 pci_request_acs();
2888
2889 return true;
2890}
2891
2892
2893
2894
2895
2896
2897
2898static int __init state_next(void)
2899{
2900 int ret = 0;
2901
2902 switch (init_state) {
2903 case IOMMU_START_STATE:
2904 if (!detect_ivrs()) {
2905 init_state = IOMMU_NOT_FOUND;
2906 ret = -ENODEV;
2907 } else {
2908 init_state = IOMMU_IVRS_DETECTED;
2909 }
2910 break;
2911 case IOMMU_IVRS_DETECTED:
2912 if (amd_iommu_disabled) {
2913 init_state = IOMMU_CMDLINE_DISABLED;
2914 ret = -EINVAL;
2915 } else {
2916 ret = early_amd_iommu_init();
2917 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
2918 }
2919 break;
2920 case IOMMU_ACPI_FINISHED:
2921 early_enable_iommus();
2922 x86_platform.iommu_shutdown = disable_iommus;
2923 init_state = IOMMU_ENABLED;
2924 break;
2925 case IOMMU_ENABLED:
2926 register_syscore_ops(&amd_iommu_syscore_ops);
2927 ret = amd_iommu_init_pci();
2928 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2929 enable_iommus_v2();
2930 break;
2931 case IOMMU_PCI_INIT:
2932 ret = amd_iommu_enable_interrupts();
2933 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2934 break;
2935 case IOMMU_INTERRUPTS_EN:
2936 init_state = IOMMU_INITIALIZED;
2937 break;
2938 case IOMMU_INITIALIZED:
2939
2940 break;
2941 case IOMMU_NOT_FOUND:
2942 case IOMMU_INIT_ERROR:
2943 case IOMMU_CMDLINE_DISABLED:
2944
2945 ret = -EINVAL;
2946 break;
2947 default:
2948
2949 BUG();
2950 }
2951
2952 if (ret) {
2953 free_dma_resources();
2954 if (!irq_remapping_enabled) {
2955 disable_iommus();
2956 free_iommu_resources();
2957 } else {
2958 struct amd_iommu *iommu;
2959
2960 uninit_device_table_dma();
2961 for_each_iommu(iommu)
2962 iommu_flush_all_caches(iommu);
2963 }
2964 }
2965 return ret;
2966}
2967
2968static int __init iommu_go_to_state(enum iommu_init_state state)
2969{
2970 int ret = -EINVAL;
2971
2972 while (init_state != state) {
2973 if (init_state == IOMMU_NOT_FOUND ||
2974 init_state == IOMMU_INIT_ERROR ||
2975 init_state == IOMMU_CMDLINE_DISABLED)
2976 break;
2977 ret = state_next();
2978 }
2979
2980 return ret;
2981}
2982
2983#ifdef CONFIG_IRQ_REMAP
2984int __init amd_iommu_prepare(void)
2985{
2986 int ret;
2987
2988 amd_iommu_irq_remap = true;
2989
2990 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2991 if (ret) {
2992 amd_iommu_irq_remap = false;
2993 return ret;
2994 }
2995
2996 return amd_iommu_irq_remap ? 0 : -ENODEV;
2997}
2998
2999int __init amd_iommu_enable(void)
3000{
3001 int ret;
3002
3003 ret = iommu_go_to_state(IOMMU_ENABLED);
3004 if (ret)
3005 return ret;
3006
3007 irq_remapping_enabled = 1;
3008 return amd_iommu_xt_mode;
3009}
3010
3011void amd_iommu_disable(void)
3012{
3013 amd_iommu_suspend();
3014}
3015
3016int amd_iommu_reenable(int mode)
3017{
3018 amd_iommu_resume();
3019
3020 return 0;
3021}
3022
3023int __init amd_iommu_enable_faulting(void)
3024{
3025
3026 return 0;
3027}
3028#endif
3029
3030
3031
3032
3033
3034
3035static int __init amd_iommu_init(void)
3036{
3037 struct amd_iommu *iommu;
3038 int ret;
3039
3040 ret = iommu_go_to_state(IOMMU_INITIALIZED);
3041#ifdef CONFIG_GART_IOMMU
3042 if (ret && list_empty(&amd_iommu_list)) {
3043
3044
3045
3046
3047 gart_iommu_init();
3048 }
3049#endif
3050
3051 for_each_iommu(iommu)
3052 amd_iommu_debugfs_setup(iommu);
3053
3054 return ret;
3055}
3056
3057static bool amd_iommu_sme_check(void)
3058{
3059 if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) ||
3060 (boot_cpu_data.x86 != 0x17))
3061 return true;
3062
3063
3064 if (boot_cpu_data.microcode >= 0x08001205)
3065 return true;
3066
3067 if ((boot_cpu_data.microcode >= 0x08001126) &&
3068 (boot_cpu_data.microcode <= 0x080011ff))
3069 return true;
3070
3071 pr_notice("IOMMU not currently supported when SME is active\n");
3072
3073 return false;
3074}
3075
3076
3077
3078
3079
3080
3081
3082
3083int __init amd_iommu_detect(void)
3084{
3085 int ret;
3086
3087 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
3088 return -ENODEV;
3089
3090 if (!amd_iommu_sme_check())
3091 return -ENODEV;
3092
3093 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
3094 if (ret)
3095 return ret;
3096
3097 amd_iommu_detected = true;
3098 iommu_detected = 1;
3099 x86_init.iommu.iommu_init = amd_iommu_init;
3100
3101 return 1;
3102}
3103
3104
3105
3106
3107
3108
3109
3110
3111static int __init parse_amd_iommu_dump(char *str)
3112{
3113 amd_iommu_dump = true;
3114
3115 return 1;
3116}
3117
3118static int __init parse_amd_iommu_intr(char *str)
3119{
3120 for (; *str; ++str) {
3121 if (strncmp(str, "legacy", 6) == 0) {
3122 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
3123 break;
3124 }
3125 if (strncmp(str, "vapic", 5) == 0) {
3126 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
3127 break;
3128 }
3129 }
3130 return 1;
3131}
3132
3133static int __init parse_amd_iommu_options(char *str)
3134{
3135 for (; *str; ++str) {
3136 if (strncmp(str, "fullflush", 9) == 0) {
3137 pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n");
3138 iommu_set_dma_strict();
3139 }
3140 if (strncmp(str, "force_enable", 12) == 0)
3141 amd_iommu_force_enable = true;
3142 if (strncmp(str, "off", 3) == 0)
3143 amd_iommu_disabled = true;
3144 if (strncmp(str, "force_isolation", 15) == 0)
3145 amd_iommu_force_isolation = true;
3146 }
3147
3148 return 1;
3149}
3150
3151static int __init parse_ivrs_ioapic(char *str)
3152{
3153 unsigned int bus, dev, fn;
3154 int ret, id, i;
3155 u16 devid;
3156
3157 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
3158
3159 if (ret != 4) {
3160 pr_err("Invalid command line: ivrs_ioapic%s\n", str);
3161 return 1;
3162 }
3163
3164 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
3165 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
3166 str);
3167 return 1;
3168 }
3169
3170 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3171
3172 cmdline_maps = true;
3173 i = early_ioapic_map_size++;
3174 early_ioapic_map[i].id = id;
3175 early_ioapic_map[i].devid = devid;
3176 early_ioapic_map[i].cmd_line = true;
3177
3178 return 1;
3179}
3180
3181static int __init parse_ivrs_hpet(char *str)
3182{
3183 unsigned int bus, dev, fn;
3184 int ret, id, i;
3185 u16 devid;
3186
3187 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
3188
3189 if (ret != 4) {
3190 pr_err("Invalid command line: ivrs_hpet%s\n", str);
3191 return 1;
3192 }
3193
3194 if (early_hpet_map_size == EARLY_MAP_SIZE) {
3195 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
3196 str);
3197 return 1;
3198 }
3199
3200 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3201
3202 cmdline_maps = true;
3203 i = early_hpet_map_size++;
3204 early_hpet_map[i].id = id;
3205 early_hpet_map[i].devid = devid;
3206 early_hpet_map[i].cmd_line = true;
3207
3208 return 1;
3209}
3210
3211static int __init parse_ivrs_acpihid(char *str)
3212{
3213 u32 bus, dev, fn;
3214 char *hid, *uid, *p;
3215 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
3216 int ret, i;
3217
3218 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
3219 if (ret != 4) {
3220 pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
3221 return 1;
3222 }
3223
3224 p = acpiid;
3225 hid = strsep(&p, ":");
3226 uid = p;
3227
3228 if (!hid || !(*hid) || !uid) {
3229 pr_err("Invalid command line: hid or uid\n");
3230 return 1;
3231 }
3232
3233 i = early_acpihid_map_size++;
3234 memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
3235 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
3236 early_acpihid_map[i].devid =
3237 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3238 early_acpihid_map[i].cmd_line = true;
3239
3240 return 1;
3241}
3242
3243__setup("amd_iommu_dump", parse_amd_iommu_dump);
3244__setup("amd_iommu=", parse_amd_iommu_options);
3245__setup("amd_iommu_intr=", parse_amd_iommu_intr);
3246__setup("ivrs_ioapic", parse_ivrs_ioapic);
3247__setup("ivrs_hpet", parse_ivrs_hpet);
3248__setup("ivrs_acpihid", parse_ivrs_acpihid);
3249
3250IOMMU_INIT_FINISH(amd_iommu_detect,
3251 gart_iommu_hole_init,
3252 NULL,
3253 NULL);
3254
3255bool amd_iommu_v2_supported(void)
3256{
3257 return amd_iommu_v2_present;
3258}
3259EXPORT_SYMBOL(amd_iommu_v2_supported);
3260
3261struct amd_iommu *get_amd_iommu(unsigned int idx)
3262{
3263 unsigned int i = 0;
3264 struct amd_iommu *iommu;
3265
3266 for_each_iommu(iommu)
3267 if (i++ == idx)
3268 return iommu;
3269 return NULL;
3270}
3271
3272
3273
3274
3275
3276
3277
3278
3279u8 amd_iommu_pc_get_max_banks(unsigned int idx)
3280{
3281 struct amd_iommu *iommu = get_amd_iommu(idx);
3282
3283 if (iommu)
3284 return iommu->max_banks;
3285
3286 return 0;
3287}
3288EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
3289
3290bool amd_iommu_pc_supported(void)
3291{
3292 return amd_iommu_pc_present;
3293}
3294EXPORT_SYMBOL(amd_iommu_pc_supported);
3295
3296u8 amd_iommu_pc_get_max_counters(unsigned int idx)
3297{
3298 struct amd_iommu *iommu = get_amd_iommu(idx);
3299
3300 if (iommu)
3301 return iommu->max_counters;
3302
3303 return 0;
3304}
3305EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
3306
3307static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3308 u8 fxn, u64 *value, bool is_write)
3309{
3310 u32 offset;
3311 u32 max_offset_lim;
3312
3313
3314 if (!amd_iommu_pc_present)
3315 return -ENODEV;
3316
3317
3318 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3319 return -ENODEV;
3320
3321 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
3322
3323
3324 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3325 (iommu->max_counters << 8) | 0x28);
3326 if ((offset < MMIO_CNTR_REG_OFFSET) ||
3327 (offset > max_offset_lim))
3328 return -EINVAL;
3329
3330 if (is_write) {
3331 u64 val = *value & GENMASK_ULL(47, 0);
3332
3333 writel((u32)val, iommu->mmio_base + offset);
3334 writel((val >> 32), iommu->mmio_base + offset + 4);
3335 } else {
3336 *value = readl(iommu->mmio_base + offset + 4);
3337 *value <<= 32;
3338 *value |= readl(iommu->mmio_base + offset);
3339 *value &= GENMASK_ULL(47, 0);
3340 }
3341
3342 return 0;
3343}
3344
3345int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3346{
3347 if (!iommu)
3348 return -EINVAL;
3349
3350 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3351}
3352
3353int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3354{
3355 if (!iommu)
3356 return -EINVAL;
3357
3358 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
3359}
3360