1
2
3
4
5
6
7
8#define pr_fmt(fmt) "AMD-Vi: " fmt
9#define dev_fmt(fmt) pr_fmt(fmt)
10
11#include <linux/pci.h>
12#include <linux/acpi.h>
13#include <linux/list.h>
14#include <linux/bitmap.h>
15#include <linux/delay.h>
16#include <linux/slab.h>
17#include <linux/syscore_ops.h>
18#include <linux/interrupt.h>
19#include <linux/msi.h>
20#include <linux/irq.h>
21#include <linux/amd-iommu.h>
22#include <linux/export.h>
23#include <linux/kmemleak.h>
24#include <linux/mem_encrypt.h>
25#include <asm/pci-direct.h>
26#include <asm/iommu.h>
27#include <asm/apic.h>
28#include <asm/gart.h>
29#include <asm/x86_init.h>
30#include <asm/iommu_table.h>
31#include <asm/io_apic.h>
32#include <asm/irq_remapping.h>
33#include <asm/set_memory.h>
34
35#include <linux/crash_dump.h>
36
37#include "amd_iommu.h"
38#include "../irq_remapping.h"
39
40
41
42
43#define IVRS_HEADER_LENGTH 48
44
45#define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
46#define ACPI_IVMD_TYPE_ALL 0x20
47#define ACPI_IVMD_TYPE 0x21
48#define ACPI_IVMD_TYPE_RANGE 0x22
49
50#define IVHD_DEV_ALL 0x01
51#define IVHD_DEV_SELECT 0x02
52#define IVHD_DEV_SELECT_RANGE_START 0x03
53#define IVHD_DEV_RANGE_END 0x04
54#define IVHD_DEV_ALIAS 0x42
55#define IVHD_DEV_ALIAS_RANGE 0x43
56#define IVHD_DEV_EXT_SELECT 0x46
57#define IVHD_DEV_EXT_SELECT_RANGE 0x47
58#define IVHD_DEV_SPECIAL 0x48
59#define IVHD_DEV_ACPI_HID 0xf0
60
61#define UID_NOT_PRESENT 0
62#define UID_IS_INTEGER 1
63#define UID_IS_CHARACTER 2
64
65#define IVHD_SPECIAL_IOAPIC 1
66#define IVHD_SPECIAL_HPET 2
67
68#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
69#define IVHD_FLAG_PASSPW_EN_MASK 0x02
70#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
71#define IVHD_FLAG_ISOC_EN_MASK 0x08
72
73#define IVMD_FLAG_EXCL_RANGE 0x08
74#define IVMD_FLAG_IW 0x04
75#define IVMD_FLAG_IR 0x02
76#define IVMD_FLAG_UNITY_MAP 0x01
77
78#define ACPI_DEVFLAG_INITPASS 0x01
79#define ACPI_DEVFLAG_EXTINT 0x02
80#define ACPI_DEVFLAG_NMI 0x04
81#define ACPI_DEVFLAG_SYSMGT1 0x10
82#define ACPI_DEVFLAG_SYSMGT2 0x20
83#define ACPI_DEVFLAG_LINT0 0x40
84#define ACPI_DEVFLAG_LINT1 0x80
85#define ACPI_DEVFLAG_ATSDIS 0x10000000
86
87#define LOOP_TIMEOUT 100000
88
89
90
91
92
93
94
95extern const struct iommu_ops amd_iommu_ops;
96
97
98
99
100
101struct ivhd_header {
102 u8 type;
103 u8 flags;
104 u16 length;
105 u16 devid;
106 u16 cap_ptr;
107 u64 mmio_phys;
108 u16 pci_seg;
109 u16 info;
110 u32 efr_attr;
111
112
113 u64 efr_reg;
114 u64 res;
115} __attribute__((packed));
116
117
118
119
120
121struct ivhd_entry {
122 u8 type;
123 u16 devid;
124 u8 flags;
125 u32 ext;
126 u32 hidh;
127 u64 cid;
128 u8 uidf;
129 u8 uidl;
130 u8 uid;
131} __attribute__((packed));
132
133
134
135
136
137struct ivmd_header {
138 u8 type;
139 u8 flags;
140 u16 length;
141 u16 devid;
142 u16 aux;
143 u64 resv;
144 u64 range_start;
145 u64 range_length;
146} __attribute__((packed));
147
148bool amd_iommu_dump;
149bool amd_iommu_irq_remap __read_mostly;
150
151enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1;
152
153int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
154static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
155
156static bool amd_iommu_detected;
157static bool __initdata amd_iommu_disabled;
158static int amd_iommu_target_ivhd_type;
159
160u16 amd_iommu_last_bdf;
161
162LIST_HEAD(amd_iommu_unity_map);
163
164bool amd_iommu_unmap_flush;
165
166LIST_HEAD(amd_iommu_list);
167
168
169
170struct amd_iommu *amd_iommus[MAX_IOMMUS];
171
172
173static int amd_iommus_present;
174
175
176bool amd_iommu_np_cache __read_mostly;
177bool amd_iommu_iotlb_sup __read_mostly = true;
178
179u32 amd_iommu_max_pasid __read_mostly = ~0;
180
181bool amd_iommu_v2_present __read_mostly;
182static bool amd_iommu_pc_present __read_mostly;
183
184bool amd_iommu_force_isolation __read_mostly;
185
186
187
188
189
190
191
192struct dev_table_entry *amd_iommu_dev_table;
193
194
195
196
197static struct dev_table_entry *old_dev_tbl_cpy;
198
199
200
201
202
203
204u16 *amd_iommu_alias_table;
205
206
207
208
209
210struct amd_iommu **amd_iommu_rlookup_table;
211EXPORT_SYMBOL(amd_iommu_rlookup_table);
212
213
214
215
216
217struct irq_remap_table **irq_lookup_table;
218
219
220
221
222
223unsigned long *amd_iommu_pd_alloc_bitmap;
224
225static u32 dev_table_size;
226static u32 alias_table_size;
227static u32 rlookup_table_size;
228
229enum iommu_init_state {
230 IOMMU_START_STATE,
231 IOMMU_IVRS_DETECTED,
232 IOMMU_ACPI_FINISHED,
233 IOMMU_ENABLED,
234 IOMMU_PCI_INIT,
235 IOMMU_INTERRUPTS_EN,
236 IOMMU_DMA_OPS,
237 IOMMU_INITIALIZED,
238 IOMMU_NOT_FOUND,
239 IOMMU_INIT_ERROR,
240 IOMMU_CMDLINE_DISABLED,
241};
242
243
244#define EARLY_MAP_SIZE 4
245static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
246static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
247static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
248
249static int __initdata early_ioapic_map_size;
250static int __initdata early_hpet_map_size;
251static int __initdata early_acpihid_map_size;
252
253static bool __initdata cmdline_maps;
254
255static enum iommu_init_state init_state = IOMMU_START_STATE;
256
257static int amd_iommu_enable_interrupts(void);
258static int __init iommu_go_to_state(enum iommu_init_state state);
259static void init_device_table_dma(void);
260static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
261 u8 fxn, u64 *value, bool is_write);
262
263static bool amd_iommu_pre_enabled = true;
264
265static u32 amd_iommu_ivinfo __initdata;
266
267bool translation_pre_enabled(struct amd_iommu *iommu)
268{
269 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
270}
271EXPORT_SYMBOL(translation_pre_enabled);
272
273static void clear_translation_pre_enabled(struct amd_iommu *iommu)
274{
275 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
276}
277
278static void init_translation_status(struct amd_iommu *iommu)
279{
280 u64 ctrl;
281
282 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
283 if (ctrl & (1<<CONTROL_IOMMU_EN))
284 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
285}
286
287static inline void update_last_devid(u16 devid)
288{
289 if (devid > amd_iommu_last_bdf)
290 amd_iommu_last_bdf = devid;
291}
292
293static inline unsigned long tbl_size(int entry_size)
294{
295 unsigned shift = PAGE_SHIFT +
296 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
297
298 return 1UL << shift;
299}
300
301int amd_iommu_get_num_iommus(void)
302{
303 return amd_iommus_present;
304}
305
306
307
308
309
310
311static void __init early_iommu_features_init(struct amd_iommu *iommu,
312 struct ivhd_header *h)
313{
314 if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
315 iommu->features = h->efr_reg;
316}
317
318
319
320static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
321{
322 u32 val;
323
324 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
325 pci_read_config_dword(iommu->dev, 0xfc, &val);
326 return val;
327}
328
329static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
330{
331 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
332 pci_write_config_dword(iommu->dev, 0xfc, val);
333 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
334}
335
336static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
337{
338 u32 val;
339
340 pci_write_config_dword(iommu->dev, 0xf0, address);
341 pci_read_config_dword(iommu->dev, 0xf4, &val);
342 return val;
343}
344
345static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
346{
347 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
348 pci_write_config_dword(iommu->dev, 0xf4, val);
349}
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364static void iommu_set_exclusion_range(struct amd_iommu *iommu)
365{
366 u64 start = iommu->exclusion_start & PAGE_MASK;
367 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
368 u64 entry;
369
370 if (!iommu->exclusion_start)
371 return;
372
373 entry = start | MMIO_EXCL_ENABLE_MASK;
374 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
375 &entry, sizeof(entry));
376
377 entry = limit;
378 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
379 &entry, sizeof(entry));
380}
381
382static void iommu_set_cwwb_range(struct amd_iommu *iommu)
383{
384 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
385 u64 entry = start & PM_ADDR_MASK;
386
387 if (!iommu_feature(iommu, FEATURE_SNP))
388 return;
389
390
391
392
393
394 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
395 &entry, sizeof(entry));
396
397
398
399
400
401 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
402 &entry, sizeof(entry));
403}
404
405
406static void iommu_set_device_table(struct amd_iommu *iommu)
407{
408 u64 entry;
409
410 BUG_ON(iommu->mmio_base == NULL);
411
412 entry = iommu_virt_to_phys(amd_iommu_dev_table);
413 entry |= (dev_table_size >> 12) - 1;
414 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
415 &entry, sizeof(entry));
416}
417
418
419static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
420{
421 u64 ctrl;
422
423 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
424 ctrl |= (1ULL << bit);
425 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
426}
427
428static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
429{
430 u64 ctrl;
431
432 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
433 ctrl &= ~(1ULL << bit);
434 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
435}
436
437static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
438{
439 u64 ctrl;
440
441 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
442 ctrl &= ~CTRL_INV_TO_MASK;
443 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
444 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
445}
446
447
448static void iommu_enable(struct amd_iommu *iommu)
449{
450 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
451}
452
453static void iommu_disable(struct amd_iommu *iommu)
454{
455 if (!iommu->mmio_base)
456 return;
457
458
459 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
460
461
462 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
463 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
464
465
466 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
467 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
468
469
470 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
471}
472
473
474
475
476
477static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
478{
479 if (!request_mem_region(address, end, "amd_iommu")) {
480 pr_err("Can not reserve memory region %llx-%llx for mmio\n",
481 address, end);
482 pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
483 return NULL;
484 }
485
486 return (u8 __iomem *)ioremap(address, end);
487}
488
489static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
490{
491 if (iommu->mmio_base)
492 iounmap(iommu->mmio_base);
493 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
494}
495
496static inline u32 get_ivhd_header_size(struct ivhd_header *h)
497{
498 u32 size = 0;
499
500 switch (h->type) {
501 case 0x10:
502 size = 24;
503 break;
504 case 0x11:
505 case 0x40:
506 size = 40;
507 break;
508 }
509 return size;
510}
511
512
513
514
515
516
517
518
519
520
521
522
523
524static inline int ivhd_entry_length(u8 *ivhd)
525{
526 u32 type = ((struct ivhd_entry *)ivhd)->type;
527
528 if (type < 0x80) {
529 return 0x04 << (*ivhd >> 6);
530 } else if (type == IVHD_DEV_ACPI_HID) {
531
532 return *((u8 *)ivhd + 21) + 22;
533 }
534 return 0;
535}
536
537
538
539
540
541static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
542{
543 u8 *p = (void *)h, *end = (void *)h;
544 struct ivhd_entry *dev;
545
546 u32 ivhd_size = get_ivhd_header_size(h);
547
548 if (!ivhd_size) {
549 pr_err("Unsupported IVHD type %#x\n", h->type);
550 return -EINVAL;
551 }
552
553 p += ivhd_size;
554 end += h->length;
555
556 while (p < end) {
557 dev = (struct ivhd_entry *)p;
558 switch (dev->type) {
559 case IVHD_DEV_ALL:
560
561 update_last_devid(0xffff);
562 break;
563 case IVHD_DEV_SELECT:
564 case IVHD_DEV_RANGE_END:
565 case IVHD_DEV_ALIAS:
566 case IVHD_DEV_EXT_SELECT:
567
568 update_last_devid(dev->devid);
569 break;
570 default:
571 break;
572 }
573 p += ivhd_entry_length(p);
574 }
575
576 WARN_ON(p != end);
577
578 return 0;
579}
580
581static int __init check_ivrs_checksum(struct acpi_table_header *table)
582{
583 int i;
584 u8 checksum = 0, *p = (u8 *)table;
585
586 for (i = 0; i < table->length; ++i)
587 checksum += p[i];
588 if (checksum != 0) {
589
590 pr_err(FW_BUG "IVRS invalid checksum\n");
591 return -ENODEV;
592 }
593
594 return 0;
595}
596
597
598
599
600
601
602static int __init find_last_devid_acpi(struct acpi_table_header *table)
603{
604 u8 *p = (u8 *)table, *end = (u8 *)table;
605 struct ivhd_header *h;
606
607 p += IVRS_HEADER_LENGTH;
608
609 end += table->length;
610 while (p < end) {
611 h = (struct ivhd_header *)p;
612 if (h->type == amd_iommu_target_ivhd_type) {
613 int ret = find_last_devid_from_ivhd(h);
614
615 if (ret)
616 return ret;
617 }
618 p += h->length;
619 }
620 WARN_ON(p != end);
621
622 return 0;
623}
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639static int __init alloc_command_buffer(struct amd_iommu *iommu)
640{
641 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
642 get_order(CMD_BUFFER_SIZE));
643
644 return iommu->cmd_buf ? 0 : -ENOMEM;
645}
646
647
648
649
650
651void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
652{
653 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
654
655 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
656 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
657 iommu->cmd_buf_head = 0;
658 iommu->cmd_buf_tail = 0;
659
660 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
661}
662
663
664
665
666
667static void iommu_enable_command_buffer(struct amd_iommu *iommu)
668{
669 u64 entry;
670
671 BUG_ON(iommu->cmd_buf == NULL);
672
673 entry = iommu_virt_to_phys(iommu->cmd_buf);
674 entry |= MMIO_CMD_SIZE_512;
675
676 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
677 &entry, sizeof(entry));
678
679 amd_iommu_reset_cmd_buffer(iommu);
680}
681
682
683
684
685static void iommu_disable_command_buffer(struct amd_iommu *iommu)
686{
687 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
688}
689
690static void __init free_command_buffer(struct amd_iommu *iommu)
691{
692 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
693}
694
695static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
696 gfp_t gfp, size_t size)
697{
698 int order = get_order(size);
699 void *buf = (void *)__get_free_pages(gfp, order);
700
701 if (buf &&
702 iommu_feature(iommu, FEATURE_SNP) &&
703 set_memory_4k((unsigned long)buf, (1 << order))) {
704 free_pages((unsigned long)buf, order);
705 buf = NULL;
706 }
707
708 return buf;
709}
710
711
712static int __init alloc_event_buffer(struct amd_iommu *iommu)
713{
714 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
715 EVT_BUFFER_SIZE);
716
717 return iommu->evt_buf ? 0 : -ENOMEM;
718}
719
720static void iommu_enable_event_buffer(struct amd_iommu *iommu)
721{
722 u64 entry;
723
724 BUG_ON(iommu->evt_buf == NULL);
725
726 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
727
728 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
729 &entry, sizeof(entry));
730
731
732 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
733 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
734
735 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
736}
737
738
739
740
741static void iommu_disable_event_buffer(struct amd_iommu *iommu)
742{
743 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
744}
745
746static void __init free_event_buffer(struct amd_iommu *iommu)
747{
748 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
749}
750
751
752static int __init alloc_ppr_log(struct amd_iommu *iommu)
753{
754 iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
755 PPR_LOG_SIZE);
756
757 return iommu->ppr_log ? 0 : -ENOMEM;
758}
759
760static void iommu_enable_ppr_log(struct amd_iommu *iommu)
761{
762 u64 entry;
763
764 if (iommu->ppr_log == NULL)
765 return;
766
767 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
768
769 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
770 &entry, sizeof(entry));
771
772
773 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
774 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
775
776 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
777 iommu_feature_enable(iommu, CONTROL_PPR_EN);
778}
779
780static void __init free_ppr_log(struct amd_iommu *iommu)
781{
782 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
783}
784
785static void free_ga_log(struct amd_iommu *iommu)
786{
787#ifdef CONFIG_IRQ_REMAP
788 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
789 free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
790#endif
791}
792
793static int iommu_ga_log_enable(struct amd_iommu *iommu)
794{
795#ifdef CONFIG_IRQ_REMAP
796 u32 status, i;
797
798 if (!iommu->ga_log)
799 return -EINVAL;
800
801 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
802
803
804 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
805 return 0;
806
807 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
808 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
809
810 for (i = 0; i < LOOP_TIMEOUT; ++i) {
811 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
812 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
813 break;
814 }
815
816 if (i >= LOOP_TIMEOUT)
817 return -EINVAL;
818#endif
819 return 0;
820}
821
822#ifdef CONFIG_IRQ_REMAP
823static int iommu_init_ga_log(struct amd_iommu *iommu)
824{
825 u64 entry;
826
827 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
828 return 0;
829
830 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
831 get_order(GA_LOG_SIZE));
832 if (!iommu->ga_log)
833 goto err_out;
834
835 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
836 get_order(8));
837 if (!iommu->ga_log_tail)
838 goto err_out;
839
840 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
841 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
842 &entry, sizeof(entry));
843 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
844 (BIT_ULL(52)-1)) & ~7ULL;
845 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
846 &entry, sizeof(entry));
847 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
848 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
849
850 return 0;
851err_out:
852 free_ga_log(iommu);
853 return -EINVAL;
854}
855#endif
856
857static int iommu_init_ga(struct amd_iommu *iommu)
858{
859 int ret = 0;
860
861#ifdef CONFIG_IRQ_REMAP
862
863
864
865 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
866 !iommu_feature(iommu, FEATURE_GAM_VAPIC))
867 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
868
869 ret = iommu_init_ga_log(iommu);
870#endif
871
872 return ret;
873}
874
875static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
876{
877 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
878
879 return iommu->cmd_sem ? 0 : -ENOMEM;
880}
881
882static void __init free_cwwb_sem(struct amd_iommu *iommu)
883{
884 if (iommu->cmd_sem)
885 free_page((unsigned long)iommu->cmd_sem);
886}
887
888static void iommu_enable_xt(struct amd_iommu *iommu)
889{
890#ifdef CONFIG_IRQ_REMAP
891
892
893
894
895 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
896 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
897 iommu_feature_enable(iommu, CONTROL_XT_EN);
898#endif
899}
900
901static void iommu_enable_gt(struct amd_iommu *iommu)
902{
903 if (!iommu_feature(iommu, FEATURE_GT))
904 return;
905
906 iommu_feature_enable(iommu, CONTROL_GT_EN);
907}
908
909
910static void set_dev_entry_bit(u16 devid, u8 bit)
911{
912 int i = (bit >> 6) & 0x03;
913 int _bit = bit & 0x3f;
914
915 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
916}
917
918static int get_dev_entry_bit(u16 devid, u8 bit)
919{
920 int i = (bit >> 6) & 0x03;
921 int _bit = bit & 0x3f;
922
923 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
924}
925
926
927static bool copy_device_table(void)
928{
929 u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
930 struct dev_table_entry *old_devtb = NULL;
931 u32 lo, hi, devid, old_devtb_size;
932 phys_addr_t old_devtb_phys;
933 struct amd_iommu *iommu;
934 u16 dom_id, dte_v, irq_v;
935 gfp_t gfp_flag;
936 u64 tmp;
937
938 if (!amd_iommu_pre_enabled)
939 return false;
940
941 pr_warn("Translation is already enabled - trying to copy translation structures\n");
942 for_each_iommu(iommu) {
943
944 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
945 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
946 entry = (((u64) hi) << 32) + lo;
947 if (last_entry && last_entry != entry) {
948 pr_err("IOMMU:%d should use the same dev table as others!\n",
949 iommu->index);
950 return false;
951 }
952 last_entry = entry;
953
954 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
955 if (old_devtb_size != dev_table_size) {
956 pr_err("The device table size of IOMMU:%d is not expected!\n",
957 iommu->index);
958 return false;
959 }
960 }
961
962
963
964
965
966
967 old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
968
969 if (old_devtb_phys >= 0x100000000ULL) {
970 pr_err("The address of old device table is above 4G, not trustworthy!\n");
971 return false;
972 }
973 old_devtb = (sme_active() && is_kdump_kernel())
974 ? (__force void *)ioremap_encrypted(old_devtb_phys,
975 dev_table_size)
976 : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
977
978 if (!old_devtb)
979 return false;
980
981 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
982 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
983 get_order(dev_table_size));
984 if (old_dev_tbl_cpy == NULL) {
985 pr_err("Failed to allocate memory for copying old device table!\n");
986 return false;
987 }
988
989 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
990 old_dev_tbl_cpy[devid] = old_devtb[devid];
991 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
992 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
993
994 if (dte_v && dom_id) {
995 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
996 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
997 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
998
999 if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
1000 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
1001 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1002 old_dev_tbl_cpy[devid].data[1] &= ~tmp;
1003 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
1004 tmp |= DTE_FLAG_GV;
1005 old_dev_tbl_cpy[devid].data[0] &= ~tmp;
1006 }
1007 }
1008
1009 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
1010 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
1011 int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK;
1012 if (irq_v && (int_ctl || int_tab_len)) {
1013 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
1014 (int_tab_len != DTE_INTTABLEN)) {
1015 pr_err("Wrong old irq remapping flag: %#x\n", devid);
1016 return false;
1017 }
1018
1019 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
1020 }
1021 }
1022 memunmap(old_devtb);
1023
1024 return true;
1025}
1026
1027void amd_iommu_apply_erratum_63(u16 devid)
1028{
1029 int sysmgt;
1030
1031 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
1032 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
1033
1034 if (sysmgt == 0x01)
1035 set_dev_entry_bit(devid, DEV_ENTRY_IW);
1036}
1037
1038
1039static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
1040{
1041 amd_iommu_rlookup_table[devid] = iommu;
1042}
1043
1044
1045
1046
1047
1048static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
1049 u16 devid, u32 flags, u32 ext_flags)
1050{
1051 if (flags & ACPI_DEVFLAG_INITPASS)
1052 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
1053 if (flags & ACPI_DEVFLAG_EXTINT)
1054 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
1055 if (flags & ACPI_DEVFLAG_NMI)
1056 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
1057 if (flags & ACPI_DEVFLAG_SYSMGT1)
1058 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
1059 if (flags & ACPI_DEVFLAG_SYSMGT2)
1060 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
1061 if (flags & ACPI_DEVFLAG_LINT0)
1062 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
1063 if (flags & ACPI_DEVFLAG_LINT1)
1064 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
1065
1066 amd_iommu_apply_erratum_63(devid);
1067
1068 set_iommu_for_device(iommu, devid);
1069}
1070
1071int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
1072{
1073 struct devid_map *entry;
1074 struct list_head *list;
1075
1076 if (type == IVHD_SPECIAL_IOAPIC)
1077 list = &ioapic_map;
1078 else if (type == IVHD_SPECIAL_HPET)
1079 list = &hpet_map;
1080 else
1081 return -EINVAL;
1082
1083 list_for_each_entry(entry, list, list) {
1084 if (!(entry->id == id && entry->cmd_line))
1085 continue;
1086
1087 pr_info("Command-line override present for %s id %d - ignoring\n",
1088 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1089
1090 *devid = entry->devid;
1091
1092 return 0;
1093 }
1094
1095 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1096 if (!entry)
1097 return -ENOMEM;
1098
1099 entry->id = id;
1100 entry->devid = *devid;
1101 entry->cmd_line = cmd_line;
1102
1103 list_add_tail(&entry->list, list);
1104
1105 return 0;
1106}
1107
1108static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
1109 bool cmd_line)
1110{
1111 struct acpihid_map_entry *entry;
1112 struct list_head *list = &acpihid_map;
1113
1114 list_for_each_entry(entry, list, list) {
1115 if (strcmp(entry->hid, hid) ||
1116 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1117 !entry->cmd_line)
1118 continue;
1119
1120 pr_info("Command-line override for hid:%s uid:%s\n",
1121 hid, uid);
1122 *devid = entry->devid;
1123 return 0;
1124 }
1125
1126 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1127 if (!entry)
1128 return -ENOMEM;
1129
1130 memcpy(entry->uid, uid, strlen(uid));
1131 memcpy(entry->hid, hid, strlen(hid));
1132 entry->devid = *devid;
1133 entry->cmd_line = cmd_line;
1134 entry->root_devid = (entry->devid & (~0x7));
1135
1136 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
1137 entry->cmd_line ? "cmd" : "ivrs",
1138 entry->hid, entry->uid, entry->root_devid);
1139
1140 list_add_tail(&entry->list, list);
1141 return 0;
1142}
1143
1144static int __init add_early_maps(void)
1145{
1146 int i, ret;
1147
1148 for (i = 0; i < early_ioapic_map_size; ++i) {
1149 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1150 early_ioapic_map[i].id,
1151 &early_ioapic_map[i].devid,
1152 early_ioapic_map[i].cmd_line);
1153 if (ret)
1154 return ret;
1155 }
1156
1157 for (i = 0; i < early_hpet_map_size; ++i) {
1158 ret = add_special_device(IVHD_SPECIAL_HPET,
1159 early_hpet_map[i].id,
1160 &early_hpet_map[i].devid,
1161 early_hpet_map[i].cmd_line);
1162 if (ret)
1163 return ret;
1164 }
1165
1166 for (i = 0; i < early_acpihid_map_size; ++i) {
1167 ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1168 early_acpihid_map[i].uid,
1169 &early_acpihid_map[i].devid,
1170 early_acpihid_map[i].cmd_line);
1171 if (ret)
1172 return ret;
1173 }
1174
1175 return 0;
1176}
1177
1178
1179
1180
1181
1182static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1183 struct ivhd_header *h)
1184{
1185 u8 *p = (u8 *)h;
1186 u8 *end = p, flags = 0;
1187 u16 devid = 0, devid_start = 0, devid_to = 0;
1188 u32 dev_i, ext_flags = 0;
1189 bool alias = false;
1190 struct ivhd_entry *e;
1191 u32 ivhd_size;
1192 int ret;
1193
1194
1195 ret = add_early_maps();
1196 if (ret)
1197 return ret;
1198
1199 amd_iommu_apply_ivrs_quirks();
1200
1201
1202
1203
1204 iommu->acpi_flags = h->flags;
1205
1206
1207
1208
1209 ivhd_size = get_ivhd_header_size(h);
1210 if (!ivhd_size) {
1211 pr_err("Unsupported IVHD type %#x\n", h->type);
1212 return -EINVAL;
1213 }
1214
1215 p += ivhd_size;
1216
1217 end += h->length;
1218
1219
1220 while (p < end) {
1221 e = (struct ivhd_entry *)p;
1222 switch (e->type) {
1223 case IVHD_DEV_ALL:
1224
1225 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
1226
1227 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1228 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1229 break;
1230 case IVHD_DEV_SELECT:
1231
1232 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1233 "flags: %02x\n",
1234 PCI_BUS_NUM(e->devid),
1235 PCI_SLOT(e->devid),
1236 PCI_FUNC(e->devid),
1237 e->flags);
1238
1239 devid = e->devid;
1240 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1241 break;
1242 case IVHD_DEV_SELECT_RANGE_START:
1243
1244 DUMP_printk(" DEV_SELECT_RANGE_START\t "
1245 "devid: %02x:%02x.%x flags: %02x\n",
1246 PCI_BUS_NUM(e->devid),
1247 PCI_SLOT(e->devid),
1248 PCI_FUNC(e->devid),
1249 e->flags);
1250
1251 devid_start = e->devid;
1252 flags = e->flags;
1253 ext_flags = 0;
1254 alias = false;
1255 break;
1256 case IVHD_DEV_ALIAS:
1257
1258 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1259 "flags: %02x devid_to: %02x:%02x.%x\n",
1260 PCI_BUS_NUM(e->devid),
1261 PCI_SLOT(e->devid),
1262 PCI_FUNC(e->devid),
1263 e->flags,
1264 PCI_BUS_NUM(e->ext >> 8),
1265 PCI_SLOT(e->ext >> 8),
1266 PCI_FUNC(e->ext >> 8));
1267
1268 devid = e->devid;
1269 devid_to = e->ext >> 8;
1270 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
1271 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1272 amd_iommu_alias_table[devid] = devid_to;
1273 break;
1274 case IVHD_DEV_ALIAS_RANGE:
1275
1276 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1277 "devid: %02x:%02x.%x flags: %02x "
1278 "devid_to: %02x:%02x.%x\n",
1279 PCI_BUS_NUM(e->devid),
1280 PCI_SLOT(e->devid),
1281 PCI_FUNC(e->devid),
1282 e->flags,
1283 PCI_BUS_NUM(e->ext >> 8),
1284 PCI_SLOT(e->ext >> 8),
1285 PCI_FUNC(e->ext >> 8));
1286
1287 devid_start = e->devid;
1288 flags = e->flags;
1289 devid_to = e->ext >> 8;
1290 ext_flags = 0;
1291 alias = true;
1292 break;
1293 case IVHD_DEV_EXT_SELECT:
1294
1295 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1296 "flags: %02x ext: %08x\n",
1297 PCI_BUS_NUM(e->devid),
1298 PCI_SLOT(e->devid),
1299 PCI_FUNC(e->devid),
1300 e->flags, e->ext);
1301
1302 devid = e->devid;
1303 set_dev_entry_from_acpi(iommu, devid, e->flags,
1304 e->ext);
1305 break;
1306 case IVHD_DEV_EXT_SELECT_RANGE:
1307
1308 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1309 "%02x:%02x.%x flags: %02x ext: %08x\n",
1310 PCI_BUS_NUM(e->devid),
1311 PCI_SLOT(e->devid),
1312 PCI_FUNC(e->devid),
1313 e->flags, e->ext);
1314
1315 devid_start = e->devid;
1316 flags = e->flags;
1317 ext_flags = e->ext;
1318 alias = false;
1319 break;
1320 case IVHD_DEV_RANGE_END:
1321
1322 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
1323 PCI_BUS_NUM(e->devid),
1324 PCI_SLOT(e->devid),
1325 PCI_FUNC(e->devid));
1326
1327 devid = e->devid;
1328 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1329 if (alias) {
1330 amd_iommu_alias_table[dev_i] = devid_to;
1331 set_dev_entry_from_acpi(iommu,
1332 devid_to, flags, ext_flags);
1333 }
1334 set_dev_entry_from_acpi(iommu, dev_i,
1335 flags, ext_flags);
1336 }
1337 break;
1338 case IVHD_DEV_SPECIAL: {
1339 u8 handle, type;
1340 const char *var;
1341 u16 devid;
1342 int ret;
1343
1344 handle = e->ext & 0xff;
1345 devid = (e->ext >> 8) & 0xffff;
1346 type = (e->ext >> 24) & 0xff;
1347
1348 if (type == IVHD_SPECIAL_IOAPIC)
1349 var = "IOAPIC";
1350 else if (type == IVHD_SPECIAL_HPET)
1351 var = "HPET";
1352 else
1353 var = "UNKNOWN";
1354
1355 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1356 var, (int)handle,
1357 PCI_BUS_NUM(devid),
1358 PCI_SLOT(devid),
1359 PCI_FUNC(devid));
1360
1361 ret = add_special_device(type, handle, &devid, false);
1362 if (ret)
1363 return ret;
1364
1365
1366
1367
1368
1369
1370 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1371
1372 break;
1373 }
1374 case IVHD_DEV_ACPI_HID: {
1375 u16 devid;
1376 u8 hid[ACPIHID_HID_LEN];
1377 u8 uid[ACPIHID_UID_LEN];
1378 int ret;
1379
1380 if (h->type != 0x40) {
1381 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1382 e->type);
1383 break;
1384 }
1385
1386 memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
1387 hid[ACPIHID_HID_LEN - 1] = '\0';
1388
1389 if (!(*hid)) {
1390 pr_err(FW_BUG "Invalid HID.\n");
1391 break;
1392 }
1393
1394 uid[0] = '\0';
1395 switch (e->uidf) {
1396 case UID_NOT_PRESENT:
1397
1398 if (e->uidl != 0)
1399 pr_warn(FW_BUG "Invalid UID length.\n");
1400
1401 break;
1402 case UID_IS_INTEGER:
1403
1404 sprintf(uid, "%d", e->uid);
1405
1406 break;
1407 case UID_IS_CHARACTER:
1408
1409 memcpy(uid, &e->uid, e->uidl);
1410 uid[e->uidl] = '\0';
1411
1412 break;
1413 default:
1414 break;
1415 }
1416
1417 devid = e->devid;
1418 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1419 hid, uid,
1420 PCI_BUS_NUM(devid),
1421 PCI_SLOT(devid),
1422 PCI_FUNC(devid));
1423
1424 flags = e->flags;
1425
1426 ret = add_acpi_hid_device(hid, uid, &devid, false);
1427 if (ret)
1428 return ret;
1429
1430
1431
1432
1433
1434
1435 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1436
1437 break;
1438 }
1439 default:
1440 break;
1441 }
1442
1443 p += ivhd_entry_length(p);
1444 }
1445
1446 return 0;
1447}
1448
1449static void __init free_iommu_one(struct amd_iommu *iommu)
1450{
1451 free_cwwb_sem(iommu);
1452 free_command_buffer(iommu);
1453 free_event_buffer(iommu);
1454 free_ppr_log(iommu);
1455 free_ga_log(iommu);
1456 iommu_unmap_mmio_space(iommu);
1457}
1458
1459static void __init free_iommu_all(void)
1460{
1461 struct amd_iommu *iommu, *next;
1462
1463 for_each_iommu_safe(iommu, next) {
1464 list_del(&iommu->list);
1465 free_iommu_one(iommu);
1466 kfree(iommu);
1467 }
1468}
1469
1470
1471
1472
1473
1474
1475
1476static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1477{
1478 u32 value;
1479
1480 if ((boot_cpu_data.x86 != 0x15) ||
1481 (boot_cpu_data.x86_model < 0x10) ||
1482 (boot_cpu_data.x86_model > 0x1f))
1483 return;
1484
1485 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1486 pci_read_config_dword(iommu->dev, 0xf4, &value);
1487
1488 if (value & BIT(2))
1489 return;
1490
1491
1492 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1493
1494 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1495 pci_info(iommu->dev, "Applying erratum 746 workaround\n");
1496
1497
1498 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1499}
1500
1501
1502
1503
1504
1505
1506
1507static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1508{
1509 u32 value;
1510
1511 if ((boot_cpu_data.x86 != 0x15) ||
1512 (boot_cpu_data.x86_model < 0x30) ||
1513 (boot_cpu_data.x86_model > 0x3f))
1514 return;
1515
1516
1517 value = iommu_read_l2(iommu, 0x47);
1518
1519 if (value & BIT(0))
1520 return;
1521
1522
1523 iommu_write_l2(iommu, 0x47, value | BIT(0));
1524
1525 pci_info(iommu->dev, "Applying ATS write check workaround\n");
1526}
1527
1528
1529
1530
1531
1532
1533static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1534{
1535 int ret;
1536
1537 raw_spin_lock_init(&iommu->lock);
1538 iommu->cmd_sem_val = 0;
1539
1540
1541 list_add_tail(&iommu->list, &amd_iommu_list);
1542 iommu->index = amd_iommus_present++;
1543
1544 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1545 WARN(1, "System has more IOMMUs than supported by this driver\n");
1546 return -ENOSYS;
1547 }
1548
1549
1550 amd_iommus[iommu->index] = iommu;
1551
1552
1553
1554
1555 iommu->devid = h->devid;
1556 iommu->cap_ptr = h->cap_ptr;
1557 iommu->pci_seg = h->pci_seg;
1558 iommu->mmio_phys = h->mmio_phys;
1559
1560 switch (h->type) {
1561 case 0x10:
1562
1563 if ((h->efr_attr != 0) &&
1564 ((h->efr_attr & (0xF << 13)) != 0) &&
1565 ((h->efr_attr & (0x3F << 17)) != 0))
1566 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1567 else
1568 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1569
1570
1571
1572
1573
1574
1575 if (!boot_cpu_has(X86_FEATURE_CX16) ||
1576 ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1577 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1578 break;
1579 case 0x11:
1580 case 0x40:
1581 if (h->efr_reg & (1 << 9))
1582 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1583 else
1584 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1585
1586
1587
1588
1589
1590
1591 if (!boot_cpu_has(X86_FEATURE_CX16) ||
1592 ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
1593 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1594 break;
1595 }
1596
1597 if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
1598 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
1599
1600 early_iommu_features_init(iommu, h);
1601
1602 break;
1603 default:
1604 return -EINVAL;
1605 }
1606
1607 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1608 iommu->mmio_phys_end);
1609 if (!iommu->mmio_base)
1610 return -ENOMEM;
1611
1612 if (alloc_cwwb_sem(iommu))
1613 return -ENOMEM;
1614
1615 if (alloc_command_buffer(iommu))
1616 return -ENOMEM;
1617
1618 if (alloc_event_buffer(iommu))
1619 return -ENOMEM;
1620
1621 iommu->int_enabled = false;
1622
1623 init_translation_status(iommu);
1624 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1625 iommu_disable(iommu);
1626 clear_translation_pre_enabled(iommu);
1627 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1628 iommu->index);
1629 }
1630 if (amd_iommu_pre_enabled)
1631 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1632
1633 ret = init_iommu_from_acpi(iommu, h);
1634 if (ret)
1635 return ret;
1636
1637 if (amd_iommu_irq_remap) {
1638 ret = amd_iommu_create_irq_domain(iommu);
1639 if (ret)
1640 return ret;
1641 }
1642
1643
1644
1645
1646
1647 amd_iommu_rlookup_table[iommu->devid] = NULL;
1648
1649 return 0;
1650}
1651
1652
1653
1654
1655
1656
1657
1658static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1659{
1660 u8 *base = (u8 *)ivrs;
1661 struct ivhd_header *ivhd = (struct ivhd_header *)
1662 (base + IVRS_HEADER_LENGTH);
1663 u8 last_type = ivhd->type;
1664 u16 devid = ivhd->devid;
1665
1666 while (((u8 *)ivhd - base < ivrs->length) &&
1667 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1668 u8 *p = (u8 *) ivhd;
1669
1670 if (ivhd->devid == devid)
1671 last_type = ivhd->type;
1672 ivhd = (struct ivhd_header *)(p + ivhd->length);
1673 }
1674
1675 return last_type;
1676}
1677
1678
1679
1680
1681
1682static int __init init_iommu_all(struct acpi_table_header *table)
1683{
1684 u8 *p = (u8 *)table, *end = (u8 *)table;
1685 struct ivhd_header *h;
1686 struct amd_iommu *iommu;
1687 int ret;
1688
1689 end += table->length;
1690 p += IVRS_HEADER_LENGTH;
1691
1692 while (p < end) {
1693 h = (struct ivhd_header *)p;
1694 if (*p == amd_iommu_target_ivhd_type) {
1695
1696 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1697 "seg: %d flags: %01x info %04x\n",
1698 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1699 PCI_FUNC(h->devid), h->cap_ptr,
1700 h->pci_seg, h->flags, h->info);
1701 DUMP_printk(" mmio-addr: %016llx\n",
1702 h->mmio_phys);
1703
1704 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1705 if (iommu == NULL)
1706 return -ENOMEM;
1707
1708 ret = init_iommu_one(iommu, h);
1709 if (ret)
1710 return ret;
1711 }
1712 p += h->length;
1713
1714 }
1715 WARN_ON(p != end);
1716
1717 return 0;
1718}
1719
1720static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
1721{
1722 int retry;
1723 struct pci_dev *pdev = iommu->dev;
1724 u64 val = 0xabcd, val2 = 0, save_reg, save_src;
1725
1726 if (!iommu_feature(iommu, FEATURE_PC))
1727 return;
1728
1729 amd_iommu_pc_present = true;
1730
1731
1732 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false) ||
1733 iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, false))
1734 goto pc_false;
1735
1736
1737
1738
1739
1740
1741
1742
1743 val = 20;
1744 if (iommu_pc_get_set_reg(iommu, 0, 0, 8, &val, true))
1745 goto pc_false;
1746
1747
1748 val = 0xabcd;
1749 for (retry = 5; retry; retry--) {
1750 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true) ||
1751 iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false) ||
1752 val2)
1753 break;
1754
1755
1756 msleep(20);
1757 }
1758
1759
1760 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true) ||
1761 iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, true))
1762 goto pc_false;
1763
1764 if (val != val2)
1765 goto pc_false;
1766
1767 pci_info(pdev, "IOMMU performance counters supported\n");
1768
1769 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1770 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1771 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1772
1773 return;
1774
1775pc_false:
1776 pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
1777 amd_iommu_pc_present = false;
1778 return;
1779}
1780
1781static ssize_t amd_iommu_show_cap(struct device *dev,
1782 struct device_attribute *attr,
1783 char *buf)
1784{
1785 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1786 return sprintf(buf, "%x\n", iommu->cap);
1787}
1788static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1789
1790static ssize_t amd_iommu_show_features(struct device *dev,
1791 struct device_attribute *attr,
1792 char *buf)
1793{
1794 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1795 return sprintf(buf, "%llx\n", iommu->features);
1796}
1797static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1798
1799static struct attribute *amd_iommu_attrs[] = {
1800 &dev_attr_cap.attr,
1801 &dev_attr_features.attr,
1802 NULL,
1803};
1804
1805static struct attribute_group amd_iommu_group = {
1806 .name = "amd-iommu",
1807 .attrs = amd_iommu_attrs,
1808};
1809
1810static const struct attribute_group *amd_iommu_groups[] = {
1811 &amd_iommu_group,
1812 NULL,
1813};
1814
1815
1816
1817
1818
1819
1820static void __init late_iommu_features_init(struct amd_iommu *iommu)
1821{
1822 u64 features;
1823
1824 if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
1825 return;
1826
1827
1828 features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
1829
1830 if (!iommu->features) {
1831 iommu->features = features;
1832 return;
1833 }
1834
1835
1836
1837
1838
1839 if (features != iommu->features)
1840 pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).",
1841 features, iommu->features);
1842}
1843
1844static int __init iommu_init_pci(struct amd_iommu *iommu)
1845{
1846 int cap_ptr = iommu->cap_ptr;
1847 int ret;
1848
1849 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
1850 iommu->devid & 0xff);
1851 if (!iommu->dev)
1852 return -ENODEV;
1853
1854
1855 iommu->dev->match_driver = false;
1856
1857 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1858 &iommu->cap);
1859
1860 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1861 amd_iommu_iotlb_sup = false;
1862
1863 late_iommu_features_init(iommu);
1864
1865 if (iommu_feature(iommu, FEATURE_GT)) {
1866 int glxval;
1867 u32 max_pasid;
1868 u64 pasmax;
1869
1870 pasmax = iommu->features & FEATURE_PASID_MASK;
1871 pasmax >>= FEATURE_PASID_SHIFT;
1872 max_pasid = (1 << (pasmax + 1)) - 1;
1873
1874 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1875
1876 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
1877
1878 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1879 glxval >>= FEATURE_GLXVAL_SHIFT;
1880
1881 if (amd_iommu_max_glx_val == -1)
1882 amd_iommu_max_glx_val = glxval;
1883 else
1884 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1885 }
1886
1887 if (iommu_feature(iommu, FEATURE_GT) &&
1888 iommu_feature(iommu, FEATURE_PPR)) {
1889 iommu->is_iommu_v2 = true;
1890 amd_iommu_v2_present = true;
1891 }
1892
1893 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1894 return -ENOMEM;
1895
1896 ret = iommu_init_ga(iommu);
1897 if (ret)
1898 return ret;
1899
1900 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1901 amd_iommu_np_cache = true;
1902
1903 init_iommu_perf_ctr(iommu);
1904
1905 if (is_rd890_iommu(iommu->dev)) {
1906 int i, j;
1907
1908 iommu->root_pdev =
1909 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
1910 PCI_DEVFN(0, 0));
1911
1912
1913
1914
1915
1916
1917 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1918 &iommu->stored_addr_lo);
1919 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1920 &iommu->stored_addr_hi);
1921
1922
1923 iommu->stored_addr_lo &= ~1;
1924
1925 for (i = 0; i < 6; i++)
1926 for (j = 0; j < 0x12; j++)
1927 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1928
1929 for (i = 0; i < 0x83; i++)
1930 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1931 }
1932
1933 amd_iommu_erratum_746_workaround(iommu);
1934 amd_iommu_ats_write_check_workaround(iommu);
1935
1936 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1937 amd_iommu_groups, "ivhd%d", iommu->index);
1938 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
1939 iommu_device_register(&iommu->iommu);
1940
1941 return pci_enable_device(iommu->dev);
1942}
1943
1944static void print_iommu_info(void)
1945{
1946 static const char * const feat_str[] = {
1947 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1948 "IA", "GA", "HE", "PC"
1949 };
1950 struct amd_iommu *iommu;
1951
1952 for_each_iommu(iommu) {
1953 struct pci_dev *pdev = iommu->dev;
1954 int i;
1955
1956 pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
1957
1958 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1959 pci_info(pdev, "Extended features (%#llx):",
1960 iommu->features);
1961 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1962 if (iommu_feature(iommu, (1ULL << i)))
1963 pr_cont(" %s", feat_str[i]);
1964 }
1965
1966 if (iommu->features & FEATURE_GAM_VAPIC)
1967 pr_cont(" GA_vAPIC");
1968
1969 pr_cont("\n");
1970 }
1971 }
1972 if (irq_remapping_enabled) {
1973 pr_info("Interrupt remapping enabled\n");
1974 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
1975 pr_info("Virtual APIC enabled\n");
1976 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
1977 pr_info("X2APIC enabled\n");
1978 }
1979}
1980
1981static int __init amd_iommu_init_pci(void)
1982{
1983 struct amd_iommu *iommu;
1984 int ret;
1985
1986 for_each_iommu(iommu) {
1987 ret = iommu_init_pci(iommu);
1988 if (ret)
1989 break;
1990
1991
1992 iommu_set_cwwb_range(iommu);
1993 }
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005 ret = amd_iommu_init_api();
2006
2007 init_device_table_dma();
2008
2009 for_each_iommu(iommu)
2010 iommu_flush_all_caches(iommu);
2011
2012 if (!ret)
2013 print_iommu_info();
2014
2015 return ret;
2016}
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027static int iommu_setup_msi(struct amd_iommu *iommu)
2028{
2029 int r;
2030
2031 r = pci_enable_msi(iommu->dev);
2032 if (r)
2033 return r;
2034
2035 r = request_threaded_irq(iommu->dev->irq,
2036 amd_iommu_int_handler,
2037 amd_iommu_int_thread,
2038 0, "AMD-Vi",
2039 iommu);
2040
2041 if (r) {
2042 pci_disable_msi(iommu->dev);
2043 return r;
2044 }
2045
2046 return 0;
2047}
2048
2049union intcapxt {
2050 u64 capxt;
2051 struct {
2052 u64 reserved_0 : 2,
2053 dest_mode_logical : 1,
2054 reserved_1 : 5,
2055 destid_0_23 : 24,
2056 vector : 8,
2057 reserved_2 : 16,
2058 destid_24_31 : 8;
2059 };
2060} __attribute__ ((packed));
2061
2062
2063
2064
2065
2066
2067static void intcapxt_unmask_irq(struct irq_data *data)
2068{
2069}
2070
2071static void intcapxt_mask_irq(struct irq_data *data)
2072{
2073}
2074
2075static struct irq_chip intcapxt_controller;
2076
2077static int intcapxt_irqdomain_activate(struct irq_domain *domain,
2078 struct irq_data *irqd, bool reserve)
2079{
2080 struct amd_iommu *iommu = irqd->chip_data;
2081 struct irq_cfg *cfg = irqd_cfg(irqd);
2082 union intcapxt xt;
2083
2084 xt.capxt = 0ULL;
2085 xt.dest_mode_logical = apic->dest_mode_logical;
2086 xt.vector = cfg->vector;
2087 xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0);
2088 xt.destid_24_31 = cfg->dest_apicid >> 24;
2089
2090
2091
2092
2093
2094 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
2095 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
2096 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
2097 return 0;
2098}
2099
2100static void intcapxt_irqdomain_deactivate(struct irq_domain *domain,
2101 struct irq_data *irqd)
2102{
2103 intcapxt_mask_irq(irqd);
2104}
2105
2106
2107static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
2108 unsigned int nr_irqs, void *arg)
2109{
2110 struct irq_alloc_info *info = arg;
2111 int i, ret;
2112
2113 if (!info || info->type != X86_IRQ_ALLOC_TYPE_AMDVI)
2114 return -EINVAL;
2115
2116 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
2117 if (ret < 0)
2118 return ret;
2119
2120 for (i = virq; i < virq + nr_irqs; i++) {
2121 struct irq_data *irqd = irq_domain_get_irq_data(domain, i);
2122
2123 irqd->chip = &intcapxt_controller;
2124 irqd->chip_data = info->data;
2125 __irq_set_handler(i, handle_edge_irq, 0, "edge");
2126 }
2127
2128 return ret;
2129}
2130
2131static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq,
2132 unsigned int nr_irqs)
2133{
2134 irq_domain_free_irqs_top(domain, virq, nr_irqs);
2135}
2136
2137static int intcapxt_set_affinity(struct irq_data *irqd,
2138 const struct cpumask *mask, bool force)
2139{
2140 struct irq_data *parent = irqd->parent_data;
2141 int ret;
2142
2143 ret = parent->chip->irq_set_affinity(parent, mask, force);
2144 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
2145 return ret;
2146
2147 return intcapxt_irqdomain_activate(irqd->domain, irqd, false);
2148}
2149
2150static struct irq_chip intcapxt_controller = {
2151 .name = "IOMMU-MSI",
2152 .irq_unmask = intcapxt_unmask_irq,
2153 .irq_mask = intcapxt_mask_irq,
2154 .irq_ack = irq_chip_ack_parent,
2155 .irq_retrigger = irq_chip_retrigger_hierarchy,
2156 .irq_set_affinity = intcapxt_set_affinity,
2157 .flags = IRQCHIP_SKIP_SET_WAKE,
2158};
2159
2160static const struct irq_domain_ops intcapxt_domain_ops = {
2161 .alloc = intcapxt_irqdomain_alloc,
2162 .free = intcapxt_irqdomain_free,
2163 .activate = intcapxt_irqdomain_activate,
2164 .deactivate = intcapxt_irqdomain_deactivate,
2165};
2166
2167
2168static struct irq_domain *iommu_irqdomain;
2169
2170static struct irq_domain *iommu_get_irqdomain(void)
2171{
2172 struct fwnode_handle *fn;
2173
2174
2175 if (iommu_irqdomain)
2176 return iommu_irqdomain;
2177
2178 fn = irq_domain_alloc_named_fwnode("AMD-Vi-MSI");
2179 if (!fn)
2180 return NULL;
2181
2182 iommu_irqdomain = irq_domain_create_hierarchy(x86_vector_domain, 0, 0,
2183 fn, &intcapxt_domain_ops,
2184 NULL);
2185 if (!iommu_irqdomain)
2186 irq_domain_free_fwnode(fn);
2187
2188 return iommu_irqdomain;
2189}
2190
2191static int iommu_setup_intcapxt(struct amd_iommu *iommu)
2192{
2193 struct irq_domain *domain;
2194 struct irq_alloc_info info;
2195 int irq, ret;
2196
2197 domain = iommu_get_irqdomain();
2198 if (!domain)
2199 return -ENXIO;
2200
2201 init_irq_alloc_info(&info, NULL);
2202 info.type = X86_IRQ_ALLOC_TYPE_AMDVI;
2203 info.data = iommu;
2204
2205 irq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info);
2206 if (irq < 0) {
2207 irq_domain_remove(domain);
2208 return irq;
2209 }
2210
2211 ret = request_threaded_irq(irq, amd_iommu_int_handler,
2212 amd_iommu_int_thread, 0, "AMD-Vi", iommu);
2213 if (ret) {
2214 irq_domain_free_irqs(irq, 1);
2215 irq_domain_remove(domain);
2216 return ret;
2217 }
2218
2219 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2220 return 0;
2221}
2222
2223static int iommu_init_irq(struct amd_iommu *iommu)
2224{
2225 int ret;
2226
2227 if (iommu->int_enabled)
2228 goto enable_faults;
2229
2230 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2231 ret = iommu_setup_intcapxt(iommu);
2232 else if (iommu->dev->msi_cap)
2233 ret = iommu_setup_msi(iommu);
2234 else
2235 ret = -ENODEV;
2236
2237 if (ret)
2238 return ret;
2239
2240 iommu->int_enabled = true;
2241enable_faults:
2242 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
2243
2244 if (iommu->ppr_log != NULL)
2245 iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
2246
2247 iommu_ga_log_enable(iommu);
2248
2249 return 0;
2250}
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260static void __init free_unity_maps(void)
2261{
2262 struct unity_map_entry *entry, *next;
2263
2264 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
2265 list_del(&entry->list);
2266 kfree(entry);
2267 }
2268}
2269
2270
2271static int __init init_unity_map_range(struct ivmd_header *m)
2272{
2273 struct unity_map_entry *e = NULL;
2274 char *s;
2275
2276 e = kzalloc(sizeof(*e), GFP_KERNEL);
2277 if (e == NULL)
2278 return -ENOMEM;
2279
2280 switch (m->type) {
2281 default:
2282 kfree(e);
2283 return 0;
2284 case ACPI_IVMD_TYPE:
2285 s = "IVMD_TYPEi\t\t\t";
2286 e->devid_start = e->devid_end = m->devid;
2287 break;
2288 case ACPI_IVMD_TYPE_ALL:
2289 s = "IVMD_TYPE_ALL\t\t";
2290 e->devid_start = 0;
2291 e->devid_end = amd_iommu_last_bdf;
2292 break;
2293 case ACPI_IVMD_TYPE_RANGE:
2294 s = "IVMD_TYPE_RANGE\t\t";
2295 e->devid_start = m->devid;
2296 e->devid_end = m->aux;
2297 break;
2298 }
2299 e->address_start = PAGE_ALIGN(m->range_start);
2300 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2301 e->prot = m->flags >> 1;
2302
2303
2304
2305
2306
2307
2308
2309
2310 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2311 e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
2312
2313 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
2314 " range_start: %016llx range_end: %016llx flags: %x\n", s,
2315 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2316 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
2317 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2318 e->address_start, e->address_end, m->flags);
2319
2320 list_add_tail(&e->list, &amd_iommu_unity_map);
2321
2322 return 0;
2323}
2324
2325
2326static int __init init_memory_definitions(struct acpi_table_header *table)
2327{
2328 u8 *p = (u8 *)table, *end = (u8 *)table;
2329 struct ivmd_header *m;
2330
2331 end += table->length;
2332 p += IVRS_HEADER_LENGTH;
2333
2334 while (p < end) {
2335 m = (struct ivmd_header *)p;
2336 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
2337 init_unity_map_range(m);
2338
2339 p += m->length;
2340 }
2341
2342 return 0;
2343}
2344
2345
2346
2347
2348static void init_device_table_dma(void)
2349{
2350 u32 devid;
2351
2352 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2353 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
2354 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
2355 }
2356}
2357
2358static void __init uninit_device_table_dma(void)
2359{
2360 u32 devid;
2361
2362 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2363 amd_iommu_dev_table[devid].data[0] = 0ULL;
2364 amd_iommu_dev_table[devid].data[1] = 0ULL;
2365 }
2366}
2367
2368static void init_device_table(void)
2369{
2370 u32 devid;
2371
2372 if (!amd_iommu_irq_remap)
2373 return;
2374
2375 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2376 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
2377}
2378
2379static void iommu_init_flags(struct amd_iommu *iommu)
2380{
2381 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2382 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2383 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2384
2385 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2386 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2387 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2388
2389 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2390 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2391 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2392
2393 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2394 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2395 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2396
2397
2398
2399
2400 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2401
2402
2403 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
2404}
2405
2406static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2407{
2408 int i, j;
2409 u32 ioc_feature_control;
2410 struct pci_dev *pdev = iommu->root_pdev;
2411
2412
2413 if (!is_rd890_iommu(iommu->dev) || !pdev)
2414 return;
2415
2416
2417
2418
2419
2420
2421
2422 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2423 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2424
2425
2426 if (!(ioc_feature_control & 0x1))
2427 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2428
2429
2430 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2431 iommu->stored_addr_lo);
2432 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2433 iommu->stored_addr_hi);
2434
2435
2436 for (i = 0; i < 6; i++)
2437 for (j = 0; j < 0x12; j++)
2438 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2439
2440
2441 for (i = 0; i < 0x83; i++)
2442 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2443
2444
2445 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2446 iommu->stored_addr_lo | 1);
2447}
2448
2449static void iommu_enable_ga(struct amd_iommu *iommu)
2450{
2451#ifdef CONFIG_IRQ_REMAP
2452 switch (amd_iommu_guest_ir) {
2453 case AMD_IOMMU_GUEST_IR_VAPIC:
2454 iommu_feature_enable(iommu, CONTROL_GAM_EN);
2455 fallthrough;
2456 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2457 iommu_feature_enable(iommu, CONTROL_GA_EN);
2458 iommu->irte_ops = &irte_128_ops;
2459 break;
2460 default:
2461 iommu->irte_ops = &irte_32_ops;
2462 break;
2463 }
2464#endif
2465}
2466
2467static void early_enable_iommu(struct amd_iommu *iommu)
2468{
2469 iommu_disable(iommu);
2470 iommu_init_flags(iommu);
2471 iommu_set_device_table(iommu);
2472 iommu_enable_command_buffer(iommu);
2473 iommu_enable_event_buffer(iommu);
2474 iommu_set_exclusion_range(iommu);
2475 iommu_enable_ga(iommu);
2476 iommu_enable_xt(iommu);
2477 iommu_enable(iommu);
2478 iommu_flush_all_caches(iommu);
2479}
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489static void early_enable_iommus(void)
2490{
2491 struct amd_iommu *iommu;
2492
2493
2494 if (!copy_device_table()) {
2495
2496
2497
2498
2499
2500 if (amd_iommu_pre_enabled)
2501 pr_err("Failed to copy DEV table from previous kernel.\n");
2502 if (old_dev_tbl_cpy != NULL)
2503 free_pages((unsigned long)old_dev_tbl_cpy,
2504 get_order(dev_table_size));
2505
2506 for_each_iommu(iommu) {
2507 clear_translation_pre_enabled(iommu);
2508 early_enable_iommu(iommu);
2509 }
2510 } else {
2511 pr_info("Copied DEV table from previous kernel.\n");
2512 free_pages((unsigned long)amd_iommu_dev_table,
2513 get_order(dev_table_size));
2514 amd_iommu_dev_table = old_dev_tbl_cpy;
2515 for_each_iommu(iommu) {
2516 iommu_disable_command_buffer(iommu);
2517 iommu_disable_event_buffer(iommu);
2518 iommu_enable_command_buffer(iommu);
2519 iommu_enable_event_buffer(iommu);
2520 iommu_enable_ga(iommu);
2521 iommu_enable_xt(iommu);
2522 iommu_set_device_table(iommu);
2523 iommu_flush_all_caches(iommu);
2524 }
2525 }
2526
2527#ifdef CONFIG_IRQ_REMAP
2528 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2529 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2530#endif
2531}
2532
2533static void enable_iommus_v2(void)
2534{
2535 struct amd_iommu *iommu;
2536
2537 for_each_iommu(iommu) {
2538 iommu_enable_ppr_log(iommu);
2539 iommu_enable_gt(iommu);
2540 }
2541}
2542
2543static void enable_iommus(void)
2544{
2545 early_enable_iommus();
2546
2547 enable_iommus_v2();
2548}
2549
2550static void disable_iommus(void)
2551{
2552 struct amd_iommu *iommu;
2553
2554 for_each_iommu(iommu)
2555 iommu_disable(iommu);
2556
2557#ifdef CONFIG_IRQ_REMAP
2558 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2559 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2560#endif
2561}
2562
2563
2564
2565
2566
2567
2568static void amd_iommu_resume(void)
2569{
2570 struct amd_iommu *iommu;
2571
2572 for_each_iommu(iommu)
2573 iommu_apply_resume_quirks(iommu);
2574
2575
2576 enable_iommus();
2577
2578 amd_iommu_enable_interrupts();
2579}
2580
2581static int amd_iommu_suspend(void)
2582{
2583
2584 disable_iommus();
2585
2586 return 0;
2587}
2588
2589static struct syscore_ops amd_iommu_syscore_ops = {
2590 .suspend = amd_iommu_suspend,
2591 .resume = amd_iommu_resume,
2592};
2593
2594static void __init free_iommu_resources(void)
2595{
2596 kmemleak_free(irq_lookup_table);
2597 free_pages((unsigned long)irq_lookup_table,
2598 get_order(rlookup_table_size));
2599 irq_lookup_table = NULL;
2600
2601 kmem_cache_destroy(amd_iommu_irq_cache);
2602 amd_iommu_irq_cache = NULL;
2603
2604 free_pages((unsigned long)amd_iommu_rlookup_table,
2605 get_order(rlookup_table_size));
2606 amd_iommu_rlookup_table = NULL;
2607
2608 free_pages((unsigned long)amd_iommu_alias_table,
2609 get_order(alias_table_size));
2610 amd_iommu_alias_table = NULL;
2611
2612 free_pages((unsigned long)amd_iommu_dev_table,
2613 get_order(dev_table_size));
2614 amd_iommu_dev_table = NULL;
2615
2616 free_iommu_all();
2617}
2618
2619
2620#define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2621
2622static bool __init check_ioapic_information(void)
2623{
2624 const char *fw_bug = FW_BUG;
2625 bool ret, has_sb_ioapic;
2626 int idx;
2627
2628 has_sb_ioapic = false;
2629 ret = false;
2630
2631
2632
2633
2634
2635
2636 if (cmdline_maps)
2637 fw_bug = "";
2638
2639 for (idx = 0; idx < nr_ioapics; idx++) {
2640 int devid, id = mpc_ioapic_id(idx);
2641
2642 devid = get_ioapic_devid(id);
2643 if (devid < 0) {
2644 pr_err("%s: IOAPIC[%d] not in IVRS table\n",
2645 fw_bug, id);
2646 ret = false;
2647 } else if (devid == IOAPIC_SB_DEVID) {
2648 has_sb_ioapic = true;
2649 ret = true;
2650 }
2651 }
2652
2653 if (!has_sb_ioapic) {
2654
2655
2656
2657
2658
2659
2660
2661
2662 pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
2663 }
2664
2665 if (!ret)
2666 pr_err("Disabling interrupt remapping\n");
2667
2668 return ret;
2669}
2670
2671static void __init free_dma_resources(void)
2672{
2673 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2674 get_order(MAX_DOMAIN_ID/8));
2675 amd_iommu_pd_alloc_bitmap = NULL;
2676
2677 free_unity_maps();
2678}
2679
2680static void __init ivinfo_init(void *ivrs)
2681{
2682 amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
2683}
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712static int __init early_amd_iommu_init(void)
2713{
2714 struct acpi_table_header *ivrs_base;
2715 int i, remap_cache_sz, ret;
2716 acpi_status status;
2717
2718 if (!amd_iommu_detected)
2719 return -ENODEV;
2720
2721 status = acpi_get_table("IVRS", 0, &ivrs_base);
2722 if (status == AE_NOT_FOUND)
2723 return -ENODEV;
2724 else if (ACPI_FAILURE(status)) {
2725 const char *err = acpi_format_exception(status);
2726 pr_err("IVRS table error: %s\n", err);
2727 return -EINVAL;
2728 }
2729
2730
2731
2732
2733
2734 ret = check_ivrs_checksum(ivrs_base);
2735 if (ret)
2736 goto out;
2737
2738 ivinfo_init(ivrs_base);
2739
2740 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2741 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2742
2743
2744
2745
2746
2747
2748 ret = find_last_devid_acpi(ivrs_base);
2749 if (ret)
2750 goto out;
2751
2752 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
2753 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2754 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
2755
2756
2757 ret = -ENOMEM;
2758 amd_iommu_dev_table = (void *)__get_free_pages(
2759 GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
2760 get_order(dev_table_size));
2761 if (amd_iommu_dev_table == NULL)
2762 goto out;
2763
2764
2765
2766
2767
2768 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2769 get_order(alias_table_size));
2770 if (amd_iommu_alias_table == NULL)
2771 goto out;
2772
2773
2774 amd_iommu_rlookup_table = (void *)__get_free_pages(
2775 GFP_KERNEL | __GFP_ZERO,
2776 get_order(rlookup_table_size));
2777 if (amd_iommu_rlookup_table == NULL)
2778 goto out;
2779
2780 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2781 GFP_KERNEL | __GFP_ZERO,
2782 get_order(MAX_DOMAIN_ID/8));
2783 if (amd_iommu_pd_alloc_bitmap == NULL)
2784 goto out;
2785
2786
2787
2788
2789 for (i = 0; i <= amd_iommu_last_bdf; ++i)
2790 amd_iommu_alias_table[i] = i;
2791
2792
2793
2794
2795
2796 __set_bit(0, amd_iommu_pd_alloc_bitmap);
2797
2798
2799
2800
2801
2802 ret = init_iommu_all(ivrs_base);
2803 if (ret)
2804 goto out;
2805
2806
2807 if (!is_kdump_kernel() || amd_iommu_disabled)
2808 disable_iommus();
2809
2810 if (amd_iommu_irq_remap)
2811 amd_iommu_irq_remap = check_ioapic_information();
2812
2813 if (amd_iommu_irq_remap) {
2814
2815
2816
2817
2818 ret = -ENOMEM;
2819 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2820 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2821 else
2822 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
2823 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
2824 remap_cache_sz,
2825 DTE_INTTAB_ALIGNMENT,
2826 0, NULL);
2827 if (!amd_iommu_irq_cache)
2828 goto out;
2829
2830 irq_lookup_table = (void *)__get_free_pages(
2831 GFP_KERNEL | __GFP_ZERO,
2832 get_order(rlookup_table_size));
2833 kmemleak_alloc(irq_lookup_table, rlookup_table_size,
2834 1, GFP_KERNEL);
2835 if (!irq_lookup_table)
2836 goto out;
2837 }
2838
2839 ret = init_memory_definitions(ivrs_base);
2840 if (ret)
2841 goto out;
2842
2843
2844 init_device_table();
2845
2846out:
2847
2848 acpi_put_table(ivrs_base);
2849
2850 return ret;
2851}
2852
2853static int amd_iommu_enable_interrupts(void)
2854{
2855 struct amd_iommu *iommu;
2856 int ret = 0;
2857
2858 for_each_iommu(iommu) {
2859 ret = iommu_init_irq(iommu);
2860 if (ret)
2861 goto out;
2862 }
2863
2864out:
2865 return ret;
2866}
2867
2868static bool detect_ivrs(void)
2869{
2870 struct acpi_table_header *ivrs_base;
2871 acpi_status status;
2872 int i;
2873
2874 status = acpi_get_table("IVRS", 0, &ivrs_base);
2875 if (status == AE_NOT_FOUND)
2876 return false;
2877 else if (ACPI_FAILURE(status)) {
2878 const char *err = acpi_format_exception(status);
2879 pr_err("IVRS table error: %s\n", err);
2880 return false;
2881 }
2882
2883 acpi_put_table(ivrs_base);
2884
2885
2886 for (i = 0; i < 32; i++) {
2887 u32 pci_id;
2888
2889 pci_id = read_pci_config(0, i, 0, 0);
2890 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
2891 pr_info("Disable IOMMU on Stoney Ridge\n");
2892 return false;
2893 }
2894 }
2895
2896
2897 pci_request_acs();
2898
2899 return true;
2900}
2901
2902
2903
2904
2905
2906
2907
2908static int __init state_next(void)
2909{
2910 int ret = 0;
2911
2912 switch (init_state) {
2913 case IOMMU_START_STATE:
2914 if (!detect_ivrs()) {
2915 init_state = IOMMU_NOT_FOUND;
2916 ret = -ENODEV;
2917 } else {
2918 init_state = IOMMU_IVRS_DETECTED;
2919 }
2920 break;
2921 case IOMMU_IVRS_DETECTED:
2922 if (amd_iommu_disabled) {
2923 init_state = IOMMU_CMDLINE_DISABLED;
2924 ret = -EINVAL;
2925 } else {
2926 ret = early_amd_iommu_init();
2927 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
2928 }
2929 break;
2930 case IOMMU_ACPI_FINISHED:
2931 early_enable_iommus();
2932 x86_platform.iommu_shutdown = disable_iommus;
2933 init_state = IOMMU_ENABLED;
2934 break;
2935 case IOMMU_ENABLED:
2936 register_syscore_ops(&amd_iommu_syscore_ops);
2937 ret = amd_iommu_init_pci();
2938 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2939 enable_iommus_v2();
2940 break;
2941 case IOMMU_PCI_INIT:
2942 ret = amd_iommu_enable_interrupts();
2943 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2944 break;
2945 case IOMMU_INTERRUPTS_EN:
2946 ret = amd_iommu_init_dma_ops();
2947 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2948 break;
2949 case IOMMU_DMA_OPS:
2950 init_state = IOMMU_INITIALIZED;
2951 break;
2952 case IOMMU_INITIALIZED:
2953
2954 break;
2955 case IOMMU_NOT_FOUND:
2956 case IOMMU_INIT_ERROR:
2957 case IOMMU_CMDLINE_DISABLED:
2958
2959 ret = -EINVAL;
2960 break;
2961 default:
2962
2963 BUG();
2964 }
2965
2966 if (ret) {
2967 free_dma_resources();
2968 if (!irq_remapping_enabled) {
2969 disable_iommus();
2970 free_iommu_resources();
2971 } else {
2972 struct amd_iommu *iommu;
2973
2974 uninit_device_table_dma();
2975 for_each_iommu(iommu)
2976 iommu_flush_all_caches(iommu);
2977 }
2978 }
2979 return ret;
2980}
2981
2982static int __init iommu_go_to_state(enum iommu_init_state state)
2983{
2984 int ret = -EINVAL;
2985
2986 while (init_state != state) {
2987 if (init_state == IOMMU_NOT_FOUND ||
2988 init_state == IOMMU_INIT_ERROR ||
2989 init_state == IOMMU_CMDLINE_DISABLED)
2990 break;
2991 ret = state_next();
2992 }
2993
2994 return ret;
2995}
2996
2997#ifdef CONFIG_IRQ_REMAP
2998int __init amd_iommu_prepare(void)
2999{
3000 int ret;
3001
3002 amd_iommu_irq_remap = true;
3003
3004 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
3005 if (ret) {
3006 amd_iommu_irq_remap = false;
3007 return ret;
3008 }
3009
3010 return amd_iommu_irq_remap ? 0 : -ENODEV;
3011}
3012
3013int __init amd_iommu_enable(void)
3014{
3015 int ret;
3016
3017 ret = iommu_go_to_state(IOMMU_ENABLED);
3018 if (ret)
3019 return ret;
3020
3021 irq_remapping_enabled = 1;
3022 return amd_iommu_xt_mode;
3023}
3024
3025void amd_iommu_disable(void)
3026{
3027 amd_iommu_suspend();
3028}
3029
3030int amd_iommu_reenable(int mode)
3031{
3032 amd_iommu_resume();
3033
3034 return 0;
3035}
3036
3037int __init amd_iommu_enable_faulting(void)
3038{
3039
3040 return 0;
3041}
3042#endif
3043
3044
3045
3046
3047
3048
3049static int __init amd_iommu_init(void)
3050{
3051 struct amd_iommu *iommu;
3052 int ret;
3053
3054 ret = iommu_go_to_state(IOMMU_INITIALIZED);
3055#ifdef CONFIG_GART_IOMMU
3056 if (ret && list_empty(&amd_iommu_list)) {
3057
3058
3059
3060
3061 gart_iommu_init();
3062 }
3063#endif
3064
3065 for_each_iommu(iommu)
3066 amd_iommu_debugfs_setup(iommu);
3067
3068 return ret;
3069}
3070
3071static bool amd_iommu_sme_check(void)
3072{
3073 if (!sme_active() || (boot_cpu_data.x86 != 0x17))
3074 return true;
3075
3076
3077 if (boot_cpu_data.microcode >= 0x08001205)
3078 return true;
3079
3080 if ((boot_cpu_data.microcode >= 0x08001126) &&
3081 (boot_cpu_data.microcode <= 0x080011ff))
3082 return true;
3083
3084 pr_notice("IOMMU not currently supported when SME is active\n");
3085
3086 return false;
3087}
3088
3089
3090
3091
3092
3093
3094
3095
3096int __init amd_iommu_detect(void)
3097{
3098 int ret;
3099
3100 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
3101 return -ENODEV;
3102
3103 if (!amd_iommu_sme_check())
3104 return -ENODEV;
3105
3106 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
3107 if (ret)
3108 return ret;
3109
3110 amd_iommu_detected = true;
3111 iommu_detected = 1;
3112 x86_init.iommu.iommu_init = amd_iommu_init;
3113
3114 return 1;
3115}
3116
3117
3118
3119
3120
3121
3122
3123
3124static int __init parse_amd_iommu_dump(char *str)
3125{
3126 amd_iommu_dump = true;
3127
3128 return 1;
3129}
3130
3131static int __init parse_amd_iommu_intr(char *str)
3132{
3133 for (; *str; ++str) {
3134 if (strncmp(str, "legacy", 6) == 0) {
3135 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
3136 break;
3137 }
3138 if (strncmp(str, "vapic", 5) == 0) {
3139 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
3140 break;
3141 }
3142 }
3143 return 1;
3144}
3145
3146static int __init parse_amd_iommu_options(char *str)
3147{
3148 for (; *str; ++str) {
3149 if (strncmp(str, "fullflush", 9) == 0)
3150 amd_iommu_unmap_flush = true;
3151 if (strncmp(str, "off", 3) == 0)
3152 amd_iommu_disabled = true;
3153 if (strncmp(str, "force_isolation", 15) == 0)
3154 amd_iommu_force_isolation = true;
3155 }
3156
3157 return 1;
3158}
3159
3160static int __init parse_ivrs_ioapic(char *str)
3161{
3162 unsigned int bus, dev, fn;
3163 int ret, id, i;
3164 u16 devid;
3165
3166 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
3167
3168 if (ret != 4) {
3169 pr_err("Invalid command line: ivrs_ioapic%s\n", str);
3170 return 1;
3171 }
3172
3173 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
3174 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
3175 str);
3176 return 1;
3177 }
3178
3179 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3180
3181 cmdline_maps = true;
3182 i = early_ioapic_map_size++;
3183 early_ioapic_map[i].id = id;
3184 early_ioapic_map[i].devid = devid;
3185 early_ioapic_map[i].cmd_line = true;
3186
3187 return 1;
3188}
3189
3190static int __init parse_ivrs_hpet(char *str)
3191{
3192 unsigned int bus, dev, fn;
3193 int ret, id, i;
3194 u16 devid;
3195
3196 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
3197
3198 if (ret != 4) {
3199 pr_err("Invalid command line: ivrs_hpet%s\n", str);
3200 return 1;
3201 }
3202
3203 if (early_hpet_map_size == EARLY_MAP_SIZE) {
3204 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
3205 str);
3206 return 1;
3207 }
3208
3209 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3210
3211 cmdline_maps = true;
3212 i = early_hpet_map_size++;
3213 early_hpet_map[i].id = id;
3214 early_hpet_map[i].devid = devid;
3215 early_hpet_map[i].cmd_line = true;
3216
3217 return 1;
3218}
3219
3220static int __init parse_ivrs_acpihid(char *str)
3221{
3222 u32 bus, dev, fn;
3223 char *hid, *uid, *p;
3224 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
3225 int ret, i;
3226
3227 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
3228 if (ret != 4) {
3229 pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
3230 return 1;
3231 }
3232
3233 p = acpiid;
3234 hid = strsep(&p, ":");
3235 uid = p;
3236
3237 if (!hid || !(*hid) || !uid) {
3238 pr_err("Invalid command line: hid or uid\n");
3239 return 1;
3240 }
3241
3242 i = early_acpihid_map_size++;
3243 memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
3244 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
3245 early_acpihid_map[i].devid =
3246 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3247 early_acpihid_map[i].cmd_line = true;
3248
3249 return 1;
3250}
3251
3252__setup("amd_iommu_dump", parse_amd_iommu_dump);
3253__setup("amd_iommu=", parse_amd_iommu_options);
3254__setup("amd_iommu_intr=", parse_amd_iommu_intr);
3255__setup("ivrs_ioapic", parse_ivrs_ioapic);
3256__setup("ivrs_hpet", parse_ivrs_hpet);
3257__setup("ivrs_acpihid", parse_ivrs_acpihid);
3258
3259IOMMU_INIT_FINISH(amd_iommu_detect,
3260 gart_iommu_hole_init,
3261 NULL,
3262 NULL);
3263
3264bool amd_iommu_v2_supported(void)
3265{
3266 return amd_iommu_v2_present;
3267}
3268EXPORT_SYMBOL(amd_iommu_v2_supported);
3269
3270struct amd_iommu *get_amd_iommu(unsigned int idx)
3271{
3272 unsigned int i = 0;
3273 struct amd_iommu *iommu;
3274
3275 for_each_iommu(iommu)
3276 if (i++ == idx)
3277 return iommu;
3278 return NULL;
3279}
3280EXPORT_SYMBOL(get_amd_iommu);
3281
3282
3283
3284
3285
3286
3287
3288
3289u8 amd_iommu_pc_get_max_banks(unsigned int idx)
3290{
3291 struct amd_iommu *iommu = get_amd_iommu(idx);
3292
3293 if (iommu)
3294 return iommu->max_banks;
3295
3296 return 0;
3297}
3298EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
3299
3300bool amd_iommu_pc_supported(void)
3301{
3302 return amd_iommu_pc_present;
3303}
3304EXPORT_SYMBOL(amd_iommu_pc_supported);
3305
3306u8 amd_iommu_pc_get_max_counters(unsigned int idx)
3307{
3308 struct amd_iommu *iommu = get_amd_iommu(idx);
3309
3310 if (iommu)
3311 return iommu->max_counters;
3312
3313 return 0;
3314}
3315EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
3316
3317static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3318 u8 fxn, u64 *value, bool is_write)
3319{
3320 u32 offset;
3321 u32 max_offset_lim;
3322
3323
3324 if (!amd_iommu_pc_present)
3325 return -ENODEV;
3326
3327
3328 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3329 return -ENODEV;
3330
3331 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
3332
3333
3334 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3335 (iommu->max_counters << 8) | 0x28);
3336 if ((offset < MMIO_CNTR_REG_OFFSET) ||
3337 (offset > max_offset_lim))
3338 return -EINVAL;
3339
3340 if (is_write) {
3341 u64 val = *value & GENMASK_ULL(47, 0);
3342
3343 writel((u32)val, iommu->mmio_base + offset);
3344 writel((val >> 32), iommu->mmio_base + offset + 4);
3345 } else {
3346 *value = readl(iommu->mmio_base + offset + 4);
3347 *value <<= 32;
3348 *value |= readl(iommu->mmio_base + offset);
3349 *value &= GENMASK_ULL(47, 0);
3350 }
3351
3352 return 0;
3353}
3354
3355int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3356{
3357 if (!iommu)
3358 return -EINVAL;
3359
3360 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3361}
3362EXPORT_SYMBOL(amd_iommu_pc_get_reg);
3363
3364int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3365{
3366 if (!iommu)
3367 return -EINVAL;
3368
3369 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
3370}
3371EXPORT_SYMBOL(amd_iommu_pc_set_reg);
3372