1
2
3
4
5
6
7
8#define pr_fmt(fmt) "AMD-Vi: " fmt
9#define dev_fmt(fmt) pr_fmt(fmt)
10
11#include <linux/pci.h>
12#include <linux/acpi.h>
13#include <linux/list.h>
14#include <linux/bitmap.h>
15#include <linux/slab.h>
16#include <linux/syscore_ops.h>
17#include <linux/interrupt.h>
18#include <linux/msi.h>
19#include <linux/irq.h>
20#include <linux/amd-iommu.h>
21#include <linux/export.h>
22#include <linux/kmemleak.h>
23#include <linux/mem_encrypt.h>
24#include <asm/pci-direct.h>
25#include <asm/iommu.h>
26#include <asm/apic.h>
27#include <asm/gart.h>
28#include <asm/x86_init.h>
29#include <asm/iommu_table.h>
30#include <asm/io_apic.h>
31#include <asm/irq_remapping.h>
32#include <asm/set_memory.h>
33
34#include <linux/crash_dump.h>
35
36#include "amd_iommu.h"
37#include "../irq_remapping.h"
38
39
40
41
42#define IVRS_HEADER_LENGTH 48
43
44#define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
45#define ACPI_IVMD_TYPE_ALL 0x20
46#define ACPI_IVMD_TYPE 0x21
47#define ACPI_IVMD_TYPE_RANGE 0x22
48
49#define IVHD_DEV_ALL 0x01
50#define IVHD_DEV_SELECT 0x02
51#define IVHD_DEV_SELECT_RANGE_START 0x03
52#define IVHD_DEV_RANGE_END 0x04
53#define IVHD_DEV_ALIAS 0x42
54#define IVHD_DEV_ALIAS_RANGE 0x43
55#define IVHD_DEV_EXT_SELECT 0x46
56#define IVHD_DEV_EXT_SELECT_RANGE 0x47
57#define IVHD_DEV_SPECIAL 0x48
58#define IVHD_DEV_ACPI_HID 0xf0
59
60#define UID_NOT_PRESENT 0
61#define UID_IS_INTEGER 1
62#define UID_IS_CHARACTER 2
63
64#define IVHD_SPECIAL_IOAPIC 1
65#define IVHD_SPECIAL_HPET 2
66
67#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
68#define IVHD_FLAG_PASSPW_EN_MASK 0x02
69#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
70#define IVHD_FLAG_ISOC_EN_MASK 0x08
71
72#define IVMD_FLAG_EXCL_RANGE 0x08
73#define IVMD_FLAG_IW 0x04
74#define IVMD_FLAG_IR 0x02
75#define IVMD_FLAG_UNITY_MAP 0x01
76
77#define ACPI_DEVFLAG_INITPASS 0x01
78#define ACPI_DEVFLAG_EXTINT 0x02
79#define ACPI_DEVFLAG_NMI 0x04
80#define ACPI_DEVFLAG_SYSMGT1 0x10
81#define ACPI_DEVFLAG_SYSMGT2 0x20
82#define ACPI_DEVFLAG_LINT0 0x40
83#define ACPI_DEVFLAG_LINT1 0x80
84#define ACPI_DEVFLAG_ATSDIS 0x10000000
85
86#define LOOP_TIMEOUT 100000
87
88
89
90
91
92
93
94extern const struct iommu_ops amd_iommu_ops;
95
96
97
98
99
100struct ivhd_header {
101 u8 type;
102 u8 flags;
103 u16 length;
104 u16 devid;
105 u16 cap_ptr;
106 u64 mmio_phys;
107 u16 pci_seg;
108 u16 info;
109 u32 efr_attr;
110
111
112 u64 efr_reg;
113 u64 res;
114} __attribute__((packed));
115
116
117
118
119
120struct ivhd_entry {
121 u8 type;
122 u16 devid;
123 u8 flags;
124 u32 ext;
125 u32 hidh;
126 u64 cid;
127 u8 uidf;
128 u8 uidl;
129 u8 uid;
130} __attribute__((packed));
131
132
133
134
135
136struct ivmd_header {
137 u8 type;
138 u8 flags;
139 u16 length;
140 u16 devid;
141 u16 aux;
142 u64 resv;
143 u64 range_start;
144 u64 range_length;
145} __attribute__((packed));
146
147bool amd_iommu_dump;
148bool amd_iommu_irq_remap __read_mostly;
149
150enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1;
151
152int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
153static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
154
155static bool amd_iommu_detected;
156static bool amd_iommu_disabled __initdata;
157static bool amd_iommu_force_enable __initdata;
158static int amd_iommu_target_ivhd_type;
159
160u16 amd_iommu_last_bdf;
161
162LIST_HEAD(amd_iommu_unity_map);
163
164bool amd_iommu_unmap_flush;
165
166LIST_HEAD(amd_iommu_list);
167
168
169
170struct amd_iommu *amd_iommus[MAX_IOMMUS];
171
172
173static int amd_iommus_present;
174
175
176bool amd_iommu_np_cache __read_mostly;
177bool amd_iommu_iotlb_sup __read_mostly = true;
178
179u32 amd_iommu_max_pasid __read_mostly = ~0;
180
181bool amd_iommu_v2_present __read_mostly;
182static bool amd_iommu_pc_present __read_mostly;
183
184bool amd_iommu_force_isolation __read_mostly;
185
186
187
188
189
190
191
192struct dev_table_entry *amd_iommu_dev_table;
193
194
195
196
197static struct dev_table_entry *old_dev_tbl_cpy;
198
199
200
201
202
203
204u16 *amd_iommu_alias_table;
205
206
207
208
209
210struct amd_iommu **amd_iommu_rlookup_table;
211
212
213
214
215
216struct irq_remap_table **irq_lookup_table;
217
218
219
220
221
222unsigned long *amd_iommu_pd_alloc_bitmap;
223
224static u32 dev_table_size;
225static u32 alias_table_size;
226static u32 rlookup_table_size;
227
228enum iommu_init_state {
229 IOMMU_START_STATE,
230 IOMMU_IVRS_DETECTED,
231 IOMMU_ACPI_FINISHED,
232 IOMMU_ENABLED,
233 IOMMU_PCI_INIT,
234 IOMMU_INTERRUPTS_EN,
235 IOMMU_INITIALIZED,
236 IOMMU_NOT_FOUND,
237 IOMMU_INIT_ERROR,
238 IOMMU_CMDLINE_DISABLED,
239};
240
241
242#define EARLY_MAP_SIZE 4
243static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
244static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
245static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
246
247static int __initdata early_ioapic_map_size;
248static int __initdata early_hpet_map_size;
249static int __initdata early_acpihid_map_size;
250
251static bool __initdata cmdline_maps;
252
253static enum iommu_init_state init_state = IOMMU_START_STATE;
254
255static int amd_iommu_enable_interrupts(void);
256static int __init iommu_go_to_state(enum iommu_init_state state);
257static void init_device_table_dma(void);
258
259static bool amd_iommu_pre_enabled = true;
260
261static u32 amd_iommu_ivinfo __initdata;
262
263bool translation_pre_enabled(struct amd_iommu *iommu)
264{
265 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
266}
267
268static void clear_translation_pre_enabled(struct amd_iommu *iommu)
269{
270 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
271}
272
273static void init_translation_status(struct amd_iommu *iommu)
274{
275 u64 ctrl;
276
277 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
278 if (ctrl & (1<<CONTROL_IOMMU_EN))
279 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
280}
281
282static inline void update_last_devid(u16 devid)
283{
284 if (devid > amd_iommu_last_bdf)
285 amd_iommu_last_bdf = devid;
286}
287
288static inline unsigned long tbl_size(int entry_size)
289{
290 unsigned shift = PAGE_SHIFT +
291 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
292
293 return 1UL << shift;
294}
295
296int amd_iommu_get_num_iommus(void)
297{
298 return amd_iommus_present;
299}
300
301
302
303
304
305
306static void __init early_iommu_features_init(struct amd_iommu *iommu,
307 struct ivhd_header *h)
308{
309 if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
310 iommu->features = h->efr_reg;
311}
312
313
314
315static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
316{
317 u32 val;
318
319 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
320 pci_read_config_dword(iommu->dev, 0xfc, &val);
321 return val;
322}
323
324static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
325{
326 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
327 pci_write_config_dword(iommu->dev, 0xfc, val);
328 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
329}
330
331static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
332{
333 u32 val;
334
335 pci_write_config_dword(iommu->dev, 0xf0, address);
336 pci_read_config_dword(iommu->dev, 0xf4, &val);
337 return val;
338}
339
340static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
341{
342 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
343 pci_write_config_dword(iommu->dev, 0xf4, val);
344}
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359static void iommu_set_exclusion_range(struct amd_iommu *iommu)
360{
361 u64 start = iommu->exclusion_start & PAGE_MASK;
362 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
363 u64 entry;
364
365 if (!iommu->exclusion_start)
366 return;
367
368 entry = start | MMIO_EXCL_ENABLE_MASK;
369 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
370 &entry, sizeof(entry));
371
372 entry = limit;
373 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
374 &entry, sizeof(entry));
375}
376
377static void iommu_set_cwwb_range(struct amd_iommu *iommu)
378{
379 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
380 u64 entry = start & PM_ADDR_MASK;
381
382 if (!iommu_feature(iommu, FEATURE_SNP))
383 return;
384
385
386
387
388
389 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
390 &entry, sizeof(entry));
391
392
393
394
395
396 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
397 &entry, sizeof(entry));
398}
399
400
401static void iommu_set_device_table(struct amd_iommu *iommu)
402{
403 u64 entry;
404
405 BUG_ON(iommu->mmio_base == NULL);
406
407 entry = iommu_virt_to_phys(amd_iommu_dev_table);
408 entry |= (dev_table_size >> 12) - 1;
409 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
410 &entry, sizeof(entry));
411}
412
413
414static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
415{
416 u64 ctrl;
417
418 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
419 ctrl |= (1ULL << bit);
420 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
421}
422
423static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
424{
425 u64 ctrl;
426
427 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
428 ctrl &= ~(1ULL << bit);
429 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
430}
431
432static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
433{
434 u64 ctrl;
435
436 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
437 ctrl &= ~CTRL_INV_TO_MASK;
438 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
439 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
440}
441
442
443static void iommu_enable(struct amd_iommu *iommu)
444{
445 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
446}
447
448static void iommu_disable(struct amd_iommu *iommu)
449{
450 if (!iommu->mmio_base)
451 return;
452
453
454 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
455
456
457 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
458 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
459
460
461 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
462 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
463
464
465 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
466}
467
468
469
470
471
472static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
473{
474 if (!request_mem_region(address, end, "amd_iommu")) {
475 pr_err("Can not reserve memory region %llx-%llx for mmio\n",
476 address, end);
477 pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
478 return NULL;
479 }
480
481 return (u8 __iomem *)ioremap(address, end);
482}
483
484static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
485{
486 if (iommu->mmio_base)
487 iounmap(iommu->mmio_base);
488 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
489}
490
491static inline u32 get_ivhd_header_size(struct ivhd_header *h)
492{
493 u32 size = 0;
494
495 switch (h->type) {
496 case 0x10:
497 size = 24;
498 break;
499 case 0x11:
500 case 0x40:
501 size = 40;
502 break;
503 }
504 return size;
505}
506
507
508
509
510
511
512
513
514
515
516
517
518
519static inline int ivhd_entry_length(u8 *ivhd)
520{
521 u32 type = ((struct ivhd_entry *)ivhd)->type;
522
523 if (type < 0x80) {
524 return 0x04 << (*ivhd >> 6);
525 } else if (type == IVHD_DEV_ACPI_HID) {
526
527 return *((u8 *)ivhd + 21) + 22;
528 }
529 return 0;
530}
531
532
533
534
535
536static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
537{
538 u8 *p = (void *)h, *end = (void *)h;
539 struct ivhd_entry *dev;
540
541 u32 ivhd_size = get_ivhd_header_size(h);
542
543 if (!ivhd_size) {
544 pr_err("Unsupported IVHD type %#x\n", h->type);
545 return -EINVAL;
546 }
547
548 p += ivhd_size;
549 end += h->length;
550
551 while (p < end) {
552 dev = (struct ivhd_entry *)p;
553 switch (dev->type) {
554 case IVHD_DEV_ALL:
555
556 update_last_devid(0xffff);
557 break;
558 case IVHD_DEV_SELECT:
559 case IVHD_DEV_RANGE_END:
560 case IVHD_DEV_ALIAS:
561 case IVHD_DEV_EXT_SELECT:
562
563 update_last_devid(dev->devid);
564 break;
565 default:
566 break;
567 }
568 p += ivhd_entry_length(p);
569 }
570
571 WARN_ON(p != end);
572
573 return 0;
574}
575
576static int __init check_ivrs_checksum(struct acpi_table_header *table)
577{
578 int i;
579 u8 checksum = 0, *p = (u8 *)table;
580
581 for (i = 0; i < table->length; ++i)
582 checksum += p[i];
583 if (checksum != 0) {
584
585 pr_err(FW_BUG "IVRS invalid checksum\n");
586 return -ENODEV;
587 }
588
589 return 0;
590}
591
592
593
594
595
596
597static int __init find_last_devid_acpi(struct acpi_table_header *table)
598{
599 u8 *p = (u8 *)table, *end = (u8 *)table;
600 struct ivhd_header *h;
601
602 p += IVRS_HEADER_LENGTH;
603
604 end += table->length;
605 while (p < end) {
606 h = (struct ivhd_header *)p;
607 if (h->type == amd_iommu_target_ivhd_type) {
608 int ret = find_last_devid_from_ivhd(h);
609
610 if (ret)
611 return ret;
612 }
613 p += h->length;
614 }
615 WARN_ON(p != end);
616
617 return 0;
618}
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634static int __init alloc_command_buffer(struct amd_iommu *iommu)
635{
636 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
637 get_order(CMD_BUFFER_SIZE));
638
639 return iommu->cmd_buf ? 0 : -ENOMEM;
640}
641
642
643
644
645
646void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
647{
648 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
649
650 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
651 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
652 iommu->cmd_buf_head = 0;
653 iommu->cmd_buf_tail = 0;
654
655 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
656}
657
658
659
660
661
662static void iommu_enable_command_buffer(struct amd_iommu *iommu)
663{
664 u64 entry;
665
666 BUG_ON(iommu->cmd_buf == NULL);
667
668 entry = iommu_virt_to_phys(iommu->cmd_buf);
669 entry |= MMIO_CMD_SIZE_512;
670
671 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
672 &entry, sizeof(entry));
673
674 amd_iommu_reset_cmd_buffer(iommu);
675}
676
677
678
679
680static void iommu_disable_command_buffer(struct amd_iommu *iommu)
681{
682 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
683}
684
685static void __init free_command_buffer(struct amd_iommu *iommu)
686{
687 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
688}
689
690static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
691 gfp_t gfp, size_t size)
692{
693 int order = get_order(size);
694 void *buf = (void *)__get_free_pages(gfp, order);
695
696 if (buf &&
697 iommu_feature(iommu, FEATURE_SNP) &&
698 set_memory_4k((unsigned long)buf, (1 << order))) {
699 free_pages((unsigned long)buf, order);
700 buf = NULL;
701 }
702
703 return buf;
704}
705
706
707static int __init alloc_event_buffer(struct amd_iommu *iommu)
708{
709 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
710 EVT_BUFFER_SIZE);
711
712 return iommu->evt_buf ? 0 : -ENOMEM;
713}
714
715static void iommu_enable_event_buffer(struct amd_iommu *iommu)
716{
717 u64 entry;
718
719 BUG_ON(iommu->evt_buf == NULL);
720
721 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
722
723 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
724 &entry, sizeof(entry));
725
726
727 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
728 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
729
730 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
731}
732
733
734
735
736static void iommu_disable_event_buffer(struct amd_iommu *iommu)
737{
738 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
739}
740
741static void __init free_event_buffer(struct amd_iommu *iommu)
742{
743 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
744}
745
746
747static int __init alloc_ppr_log(struct amd_iommu *iommu)
748{
749 iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
750 PPR_LOG_SIZE);
751
752 return iommu->ppr_log ? 0 : -ENOMEM;
753}
754
755static void iommu_enable_ppr_log(struct amd_iommu *iommu)
756{
757 u64 entry;
758
759 if (iommu->ppr_log == NULL)
760 return;
761
762 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
763
764 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
765 &entry, sizeof(entry));
766
767
768 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
769 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
770
771 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
772 iommu_feature_enable(iommu, CONTROL_PPR_EN);
773}
774
775static void __init free_ppr_log(struct amd_iommu *iommu)
776{
777 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
778}
779
780static void free_ga_log(struct amd_iommu *iommu)
781{
782#ifdef CONFIG_IRQ_REMAP
783 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
784 free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
785#endif
786}
787
788static int iommu_ga_log_enable(struct amd_iommu *iommu)
789{
790#ifdef CONFIG_IRQ_REMAP
791 u32 status, i;
792
793 if (!iommu->ga_log)
794 return -EINVAL;
795
796 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
797
798
799 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
800 return 0;
801
802 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
803 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
804
805 for (i = 0; i < LOOP_TIMEOUT; ++i) {
806 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
807 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
808 break;
809 }
810
811 if (i >= LOOP_TIMEOUT)
812 return -EINVAL;
813#endif
814 return 0;
815}
816
817#ifdef CONFIG_IRQ_REMAP
818static int iommu_init_ga_log(struct amd_iommu *iommu)
819{
820 u64 entry;
821
822 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
823 return 0;
824
825 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
826 get_order(GA_LOG_SIZE));
827 if (!iommu->ga_log)
828 goto err_out;
829
830 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
831 get_order(8));
832 if (!iommu->ga_log_tail)
833 goto err_out;
834
835 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
836 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
837 &entry, sizeof(entry));
838 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
839 (BIT_ULL(52)-1)) & ~7ULL;
840 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
841 &entry, sizeof(entry));
842 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
843 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
844
845 return 0;
846err_out:
847 free_ga_log(iommu);
848 return -EINVAL;
849}
850#endif
851
852static int iommu_init_ga(struct amd_iommu *iommu)
853{
854 int ret = 0;
855
856#ifdef CONFIG_IRQ_REMAP
857
858
859
860 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
861 !iommu_feature(iommu, FEATURE_GAM_VAPIC))
862 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
863
864 ret = iommu_init_ga_log(iommu);
865#endif
866
867 return ret;
868}
869
870static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
871{
872 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
873
874 return iommu->cmd_sem ? 0 : -ENOMEM;
875}
876
877static void __init free_cwwb_sem(struct amd_iommu *iommu)
878{
879 if (iommu->cmd_sem)
880 free_page((unsigned long)iommu->cmd_sem);
881}
882
883static void iommu_enable_xt(struct amd_iommu *iommu)
884{
885#ifdef CONFIG_IRQ_REMAP
886
887
888
889
890 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
891 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
892 iommu_feature_enable(iommu, CONTROL_XT_EN);
893#endif
894}
895
896static void iommu_enable_gt(struct amd_iommu *iommu)
897{
898 if (!iommu_feature(iommu, FEATURE_GT))
899 return;
900
901 iommu_feature_enable(iommu, CONTROL_GT_EN);
902}
903
904
905static void set_dev_entry_bit(u16 devid, u8 bit)
906{
907 int i = (bit >> 6) & 0x03;
908 int _bit = bit & 0x3f;
909
910 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
911}
912
913static int get_dev_entry_bit(u16 devid, u8 bit)
914{
915 int i = (bit >> 6) & 0x03;
916 int _bit = bit & 0x3f;
917
918 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
919}
920
921
922static bool copy_device_table(void)
923{
924 u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
925 struct dev_table_entry *old_devtb = NULL;
926 u32 lo, hi, devid, old_devtb_size;
927 phys_addr_t old_devtb_phys;
928 struct amd_iommu *iommu;
929 u16 dom_id, dte_v, irq_v;
930 gfp_t gfp_flag;
931 u64 tmp;
932
933 if (!amd_iommu_pre_enabled)
934 return false;
935
936 pr_warn("Translation is already enabled - trying to copy translation structures\n");
937 for_each_iommu(iommu) {
938
939 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
940 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
941 entry = (((u64) hi) << 32) + lo;
942 if (last_entry && last_entry != entry) {
943 pr_err("IOMMU:%d should use the same dev table as others!\n",
944 iommu->index);
945 return false;
946 }
947 last_entry = entry;
948
949 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
950 if (old_devtb_size != dev_table_size) {
951 pr_err("The device table size of IOMMU:%d is not expected!\n",
952 iommu->index);
953 return false;
954 }
955 }
956
957
958
959
960
961
962 old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
963
964 if (old_devtb_phys >= 0x100000000ULL) {
965 pr_err("The address of old device table is above 4G, not trustworthy!\n");
966 return false;
967 }
968 old_devtb = (sme_active() && is_kdump_kernel())
969 ? (__force void *)ioremap_encrypted(old_devtb_phys,
970 dev_table_size)
971 : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
972
973 if (!old_devtb)
974 return false;
975
976 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
977 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
978 get_order(dev_table_size));
979 if (old_dev_tbl_cpy == NULL) {
980 pr_err("Failed to allocate memory for copying old device table!\n");
981 return false;
982 }
983
984 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
985 old_dev_tbl_cpy[devid] = old_devtb[devid];
986 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
987 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
988
989 if (dte_v && dom_id) {
990 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
991 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
992 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
993
994 if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
995 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
996 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
997 old_dev_tbl_cpy[devid].data[1] &= ~tmp;
998 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
999 tmp |= DTE_FLAG_GV;
1000 old_dev_tbl_cpy[devid].data[0] &= ~tmp;
1001 }
1002 }
1003
1004 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
1005 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
1006 int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK;
1007 if (irq_v && (int_ctl || int_tab_len)) {
1008 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
1009 (int_tab_len != DTE_INTTABLEN)) {
1010 pr_err("Wrong old irq remapping flag: %#x\n", devid);
1011 return false;
1012 }
1013
1014 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
1015 }
1016 }
1017 memunmap(old_devtb);
1018
1019 return true;
1020}
1021
1022void amd_iommu_apply_erratum_63(u16 devid)
1023{
1024 int sysmgt;
1025
1026 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
1027 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
1028
1029 if (sysmgt == 0x01)
1030 set_dev_entry_bit(devid, DEV_ENTRY_IW);
1031}
1032
1033
1034static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
1035{
1036 amd_iommu_rlookup_table[devid] = iommu;
1037}
1038
1039
1040
1041
1042
1043static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
1044 u16 devid, u32 flags, u32 ext_flags)
1045{
1046 if (flags & ACPI_DEVFLAG_INITPASS)
1047 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
1048 if (flags & ACPI_DEVFLAG_EXTINT)
1049 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
1050 if (flags & ACPI_DEVFLAG_NMI)
1051 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
1052 if (flags & ACPI_DEVFLAG_SYSMGT1)
1053 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
1054 if (flags & ACPI_DEVFLAG_SYSMGT2)
1055 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
1056 if (flags & ACPI_DEVFLAG_LINT0)
1057 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
1058 if (flags & ACPI_DEVFLAG_LINT1)
1059 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
1060
1061 amd_iommu_apply_erratum_63(devid);
1062
1063 set_iommu_for_device(iommu, devid);
1064}
1065
1066int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
1067{
1068 struct devid_map *entry;
1069 struct list_head *list;
1070
1071 if (type == IVHD_SPECIAL_IOAPIC)
1072 list = &ioapic_map;
1073 else if (type == IVHD_SPECIAL_HPET)
1074 list = &hpet_map;
1075 else
1076 return -EINVAL;
1077
1078 list_for_each_entry(entry, list, list) {
1079 if (!(entry->id == id && entry->cmd_line))
1080 continue;
1081
1082 pr_info("Command-line override present for %s id %d - ignoring\n",
1083 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1084
1085 *devid = entry->devid;
1086
1087 return 0;
1088 }
1089
1090 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1091 if (!entry)
1092 return -ENOMEM;
1093
1094 entry->id = id;
1095 entry->devid = *devid;
1096 entry->cmd_line = cmd_line;
1097
1098 list_add_tail(&entry->list, list);
1099
1100 return 0;
1101}
1102
1103static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
1104 bool cmd_line)
1105{
1106 struct acpihid_map_entry *entry;
1107 struct list_head *list = &acpihid_map;
1108
1109 list_for_each_entry(entry, list, list) {
1110 if (strcmp(entry->hid, hid) ||
1111 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1112 !entry->cmd_line)
1113 continue;
1114
1115 pr_info("Command-line override for hid:%s uid:%s\n",
1116 hid, uid);
1117 *devid = entry->devid;
1118 return 0;
1119 }
1120
1121 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1122 if (!entry)
1123 return -ENOMEM;
1124
1125 memcpy(entry->uid, uid, strlen(uid));
1126 memcpy(entry->hid, hid, strlen(hid));
1127 entry->devid = *devid;
1128 entry->cmd_line = cmd_line;
1129 entry->root_devid = (entry->devid & (~0x7));
1130
1131 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
1132 entry->cmd_line ? "cmd" : "ivrs",
1133 entry->hid, entry->uid, entry->root_devid);
1134
1135 list_add_tail(&entry->list, list);
1136 return 0;
1137}
1138
1139static int __init add_early_maps(void)
1140{
1141 int i, ret;
1142
1143 for (i = 0; i < early_ioapic_map_size; ++i) {
1144 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1145 early_ioapic_map[i].id,
1146 &early_ioapic_map[i].devid,
1147 early_ioapic_map[i].cmd_line);
1148 if (ret)
1149 return ret;
1150 }
1151
1152 for (i = 0; i < early_hpet_map_size; ++i) {
1153 ret = add_special_device(IVHD_SPECIAL_HPET,
1154 early_hpet_map[i].id,
1155 &early_hpet_map[i].devid,
1156 early_hpet_map[i].cmd_line);
1157 if (ret)
1158 return ret;
1159 }
1160
1161 for (i = 0; i < early_acpihid_map_size; ++i) {
1162 ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1163 early_acpihid_map[i].uid,
1164 &early_acpihid_map[i].devid,
1165 early_acpihid_map[i].cmd_line);
1166 if (ret)
1167 return ret;
1168 }
1169
1170 return 0;
1171}
1172
1173
1174
1175
1176
1177static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1178 struct ivhd_header *h)
1179{
1180 u8 *p = (u8 *)h;
1181 u8 *end = p, flags = 0;
1182 u16 devid = 0, devid_start = 0, devid_to = 0;
1183 u32 dev_i, ext_flags = 0;
1184 bool alias = false;
1185 struct ivhd_entry *e;
1186 u32 ivhd_size;
1187 int ret;
1188
1189
1190 ret = add_early_maps();
1191 if (ret)
1192 return ret;
1193
1194 amd_iommu_apply_ivrs_quirks();
1195
1196
1197
1198
1199 iommu->acpi_flags = h->flags;
1200
1201
1202
1203
1204 ivhd_size = get_ivhd_header_size(h);
1205 if (!ivhd_size) {
1206 pr_err("Unsupported IVHD type %#x\n", h->type);
1207 return -EINVAL;
1208 }
1209
1210 p += ivhd_size;
1211
1212 end += h->length;
1213
1214
1215 while (p < end) {
1216 e = (struct ivhd_entry *)p;
1217 switch (e->type) {
1218 case IVHD_DEV_ALL:
1219
1220 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
1221
1222 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1223 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1224 break;
1225 case IVHD_DEV_SELECT:
1226
1227 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1228 "flags: %02x\n",
1229 PCI_BUS_NUM(e->devid),
1230 PCI_SLOT(e->devid),
1231 PCI_FUNC(e->devid),
1232 e->flags);
1233
1234 devid = e->devid;
1235 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1236 break;
1237 case IVHD_DEV_SELECT_RANGE_START:
1238
1239 DUMP_printk(" DEV_SELECT_RANGE_START\t "
1240 "devid: %02x:%02x.%x flags: %02x\n",
1241 PCI_BUS_NUM(e->devid),
1242 PCI_SLOT(e->devid),
1243 PCI_FUNC(e->devid),
1244 e->flags);
1245
1246 devid_start = e->devid;
1247 flags = e->flags;
1248 ext_flags = 0;
1249 alias = false;
1250 break;
1251 case IVHD_DEV_ALIAS:
1252
1253 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1254 "flags: %02x devid_to: %02x:%02x.%x\n",
1255 PCI_BUS_NUM(e->devid),
1256 PCI_SLOT(e->devid),
1257 PCI_FUNC(e->devid),
1258 e->flags,
1259 PCI_BUS_NUM(e->ext >> 8),
1260 PCI_SLOT(e->ext >> 8),
1261 PCI_FUNC(e->ext >> 8));
1262
1263 devid = e->devid;
1264 devid_to = e->ext >> 8;
1265 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
1266 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1267 amd_iommu_alias_table[devid] = devid_to;
1268 break;
1269 case IVHD_DEV_ALIAS_RANGE:
1270
1271 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1272 "devid: %02x:%02x.%x flags: %02x "
1273 "devid_to: %02x:%02x.%x\n",
1274 PCI_BUS_NUM(e->devid),
1275 PCI_SLOT(e->devid),
1276 PCI_FUNC(e->devid),
1277 e->flags,
1278 PCI_BUS_NUM(e->ext >> 8),
1279 PCI_SLOT(e->ext >> 8),
1280 PCI_FUNC(e->ext >> 8));
1281
1282 devid_start = e->devid;
1283 flags = e->flags;
1284 devid_to = e->ext >> 8;
1285 ext_flags = 0;
1286 alias = true;
1287 break;
1288 case IVHD_DEV_EXT_SELECT:
1289
1290 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1291 "flags: %02x ext: %08x\n",
1292 PCI_BUS_NUM(e->devid),
1293 PCI_SLOT(e->devid),
1294 PCI_FUNC(e->devid),
1295 e->flags, e->ext);
1296
1297 devid = e->devid;
1298 set_dev_entry_from_acpi(iommu, devid, e->flags,
1299 e->ext);
1300 break;
1301 case IVHD_DEV_EXT_SELECT_RANGE:
1302
1303 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1304 "%02x:%02x.%x flags: %02x ext: %08x\n",
1305 PCI_BUS_NUM(e->devid),
1306 PCI_SLOT(e->devid),
1307 PCI_FUNC(e->devid),
1308 e->flags, e->ext);
1309
1310 devid_start = e->devid;
1311 flags = e->flags;
1312 ext_flags = e->ext;
1313 alias = false;
1314 break;
1315 case IVHD_DEV_RANGE_END:
1316
1317 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
1318 PCI_BUS_NUM(e->devid),
1319 PCI_SLOT(e->devid),
1320 PCI_FUNC(e->devid));
1321
1322 devid = e->devid;
1323 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1324 if (alias) {
1325 amd_iommu_alias_table[dev_i] = devid_to;
1326 set_dev_entry_from_acpi(iommu,
1327 devid_to, flags, ext_flags);
1328 }
1329 set_dev_entry_from_acpi(iommu, dev_i,
1330 flags, ext_flags);
1331 }
1332 break;
1333 case IVHD_DEV_SPECIAL: {
1334 u8 handle, type;
1335 const char *var;
1336 u16 devid;
1337 int ret;
1338
1339 handle = e->ext & 0xff;
1340 devid = (e->ext >> 8) & 0xffff;
1341 type = (e->ext >> 24) & 0xff;
1342
1343 if (type == IVHD_SPECIAL_IOAPIC)
1344 var = "IOAPIC";
1345 else if (type == IVHD_SPECIAL_HPET)
1346 var = "HPET";
1347 else
1348 var = "UNKNOWN";
1349
1350 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1351 var, (int)handle,
1352 PCI_BUS_NUM(devid),
1353 PCI_SLOT(devid),
1354 PCI_FUNC(devid));
1355
1356 ret = add_special_device(type, handle, &devid, false);
1357 if (ret)
1358 return ret;
1359
1360
1361
1362
1363
1364
1365 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1366
1367 break;
1368 }
1369 case IVHD_DEV_ACPI_HID: {
1370 u16 devid;
1371 u8 hid[ACPIHID_HID_LEN];
1372 u8 uid[ACPIHID_UID_LEN];
1373 int ret;
1374
1375 if (h->type != 0x40) {
1376 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1377 e->type);
1378 break;
1379 }
1380
1381 memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
1382 hid[ACPIHID_HID_LEN - 1] = '\0';
1383
1384 if (!(*hid)) {
1385 pr_err(FW_BUG "Invalid HID.\n");
1386 break;
1387 }
1388
1389 uid[0] = '\0';
1390 switch (e->uidf) {
1391 case UID_NOT_PRESENT:
1392
1393 if (e->uidl != 0)
1394 pr_warn(FW_BUG "Invalid UID length.\n");
1395
1396 break;
1397 case UID_IS_INTEGER:
1398
1399 sprintf(uid, "%d", e->uid);
1400
1401 break;
1402 case UID_IS_CHARACTER:
1403
1404 memcpy(uid, &e->uid, e->uidl);
1405 uid[e->uidl] = '\0';
1406
1407 break;
1408 default:
1409 break;
1410 }
1411
1412 devid = e->devid;
1413 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1414 hid, uid,
1415 PCI_BUS_NUM(devid),
1416 PCI_SLOT(devid),
1417 PCI_FUNC(devid));
1418
1419 flags = e->flags;
1420
1421 ret = add_acpi_hid_device(hid, uid, &devid, false);
1422 if (ret)
1423 return ret;
1424
1425
1426
1427
1428
1429
1430 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1431
1432 break;
1433 }
1434 default:
1435 break;
1436 }
1437
1438 p += ivhd_entry_length(p);
1439 }
1440
1441 return 0;
1442}
1443
1444static void __init free_iommu_one(struct amd_iommu *iommu)
1445{
1446 free_cwwb_sem(iommu);
1447 free_command_buffer(iommu);
1448 free_event_buffer(iommu);
1449 free_ppr_log(iommu);
1450 free_ga_log(iommu);
1451 iommu_unmap_mmio_space(iommu);
1452}
1453
1454static void __init free_iommu_all(void)
1455{
1456 struct amd_iommu *iommu, *next;
1457
1458 for_each_iommu_safe(iommu, next) {
1459 list_del(&iommu->list);
1460 free_iommu_one(iommu);
1461 kfree(iommu);
1462 }
1463}
1464
1465
1466
1467
1468
1469
1470
1471static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1472{
1473 u32 value;
1474
1475 if ((boot_cpu_data.x86 != 0x15) ||
1476 (boot_cpu_data.x86_model < 0x10) ||
1477 (boot_cpu_data.x86_model > 0x1f))
1478 return;
1479
1480 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1481 pci_read_config_dword(iommu->dev, 0xf4, &value);
1482
1483 if (value & BIT(2))
1484 return;
1485
1486
1487 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1488
1489 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1490 pci_info(iommu->dev, "Applying erratum 746 workaround\n");
1491
1492
1493 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1494}
1495
1496
1497
1498
1499
1500
1501
1502static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1503{
1504 u32 value;
1505
1506 if ((boot_cpu_data.x86 != 0x15) ||
1507 (boot_cpu_data.x86_model < 0x30) ||
1508 (boot_cpu_data.x86_model > 0x3f))
1509 return;
1510
1511
1512 value = iommu_read_l2(iommu, 0x47);
1513
1514 if (value & BIT(0))
1515 return;
1516
1517
1518 iommu_write_l2(iommu, 0x47, value | BIT(0));
1519
1520 pci_info(iommu->dev, "Applying ATS write check workaround\n");
1521}
1522
1523
1524
1525
1526
1527
1528static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1529{
1530 int ret;
1531
1532 raw_spin_lock_init(&iommu->lock);
1533 iommu->cmd_sem_val = 0;
1534
1535
1536 list_add_tail(&iommu->list, &amd_iommu_list);
1537 iommu->index = amd_iommus_present++;
1538
1539 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1540 WARN(1, "System has more IOMMUs than supported by this driver\n");
1541 return -ENOSYS;
1542 }
1543
1544
1545 amd_iommus[iommu->index] = iommu;
1546
1547
1548
1549
1550 iommu->devid = h->devid;
1551 iommu->cap_ptr = h->cap_ptr;
1552 iommu->pci_seg = h->pci_seg;
1553 iommu->mmio_phys = h->mmio_phys;
1554
1555 switch (h->type) {
1556 case 0x10:
1557
1558 if ((h->efr_attr != 0) &&
1559 ((h->efr_attr & (0xF << 13)) != 0) &&
1560 ((h->efr_attr & (0x3F << 17)) != 0))
1561 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1562 else
1563 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1564
1565
1566
1567
1568
1569
1570 if (!boot_cpu_has(X86_FEATURE_CX16) ||
1571 ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1572 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1573 break;
1574 case 0x11:
1575 case 0x40:
1576 if (h->efr_reg & (1 << 9))
1577 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1578 else
1579 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1580
1581
1582
1583
1584
1585
1586 if (!boot_cpu_has(X86_FEATURE_CX16) ||
1587 ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
1588 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1589 break;
1590 }
1591
1592 if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
1593 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
1594
1595 early_iommu_features_init(iommu, h);
1596
1597 break;
1598 default:
1599 return -EINVAL;
1600 }
1601
1602 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1603 iommu->mmio_phys_end);
1604 if (!iommu->mmio_base)
1605 return -ENOMEM;
1606
1607 if (alloc_cwwb_sem(iommu))
1608 return -ENOMEM;
1609
1610 if (alloc_command_buffer(iommu))
1611 return -ENOMEM;
1612
1613 if (alloc_event_buffer(iommu))
1614 return -ENOMEM;
1615
1616 iommu->int_enabled = false;
1617
1618 init_translation_status(iommu);
1619 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1620 iommu_disable(iommu);
1621 clear_translation_pre_enabled(iommu);
1622 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1623 iommu->index);
1624 }
1625 if (amd_iommu_pre_enabled)
1626 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1627
1628 ret = init_iommu_from_acpi(iommu, h);
1629 if (ret)
1630 return ret;
1631
1632 if (amd_iommu_irq_remap) {
1633 ret = amd_iommu_create_irq_domain(iommu);
1634 if (ret)
1635 return ret;
1636 }
1637
1638
1639
1640
1641
1642 amd_iommu_rlookup_table[iommu->devid] = NULL;
1643
1644 return 0;
1645}
1646
1647
1648
1649
1650
1651
1652
1653static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1654{
1655 u8 *base = (u8 *)ivrs;
1656 struct ivhd_header *ivhd = (struct ivhd_header *)
1657 (base + IVRS_HEADER_LENGTH);
1658 u8 last_type = ivhd->type;
1659 u16 devid = ivhd->devid;
1660
1661 while (((u8 *)ivhd - base < ivrs->length) &&
1662 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1663 u8 *p = (u8 *) ivhd;
1664
1665 if (ivhd->devid == devid)
1666 last_type = ivhd->type;
1667 ivhd = (struct ivhd_header *)(p + ivhd->length);
1668 }
1669
1670 return last_type;
1671}
1672
1673
1674
1675
1676
1677static int __init init_iommu_all(struct acpi_table_header *table)
1678{
1679 u8 *p = (u8 *)table, *end = (u8 *)table;
1680 struct ivhd_header *h;
1681 struct amd_iommu *iommu;
1682 int ret;
1683
1684 end += table->length;
1685 p += IVRS_HEADER_LENGTH;
1686
1687 while (p < end) {
1688 h = (struct ivhd_header *)p;
1689 if (*p == amd_iommu_target_ivhd_type) {
1690
1691 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1692 "seg: %d flags: %01x info %04x\n",
1693 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1694 PCI_FUNC(h->devid), h->cap_ptr,
1695 h->pci_seg, h->flags, h->info);
1696 DUMP_printk(" mmio-addr: %016llx\n",
1697 h->mmio_phys);
1698
1699 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1700 if (iommu == NULL)
1701 return -ENOMEM;
1702
1703 ret = init_iommu_one(iommu, h);
1704 if (ret)
1705 return ret;
1706 }
1707 p += h->length;
1708
1709 }
1710 WARN_ON(p != end);
1711
1712 return 0;
1713}
1714
1715static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1716{
1717 u64 val;
1718 struct pci_dev *pdev = iommu->dev;
1719
1720 if (!iommu_feature(iommu, FEATURE_PC))
1721 return;
1722
1723 amd_iommu_pc_present = true;
1724
1725 pci_info(pdev, "IOMMU performance counters supported\n");
1726
1727 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1728 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1729 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1730
1731 return;
1732}
1733
1734static ssize_t amd_iommu_show_cap(struct device *dev,
1735 struct device_attribute *attr,
1736 char *buf)
1737{
1738 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1739 return sprintf(buf, "%x\n", iommu->cap);
1740}
1741static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1742
1743static ssize_t amd_iommu_show_features(struct device *dev,
1744 struct device_attribute *attr,
1745 char *buf)
1746{
1747 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1748 return sprintf(buf, "%llx\n", iommu->features);
1749}
1750static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1751
1752static struct attribute *amd_iommu_attrs[] = {
1753 &dev_attr_cap.attr,
1754 &dev_attr_features.attr,
1755 NULL,
1756};
1757
1758static struct attribute_group amd_iommu_group = {
1759 .name = "amd-iommu",
1760 .attrs = amd_iommu_attrs,
1761};
1762
1763static const struct attribute_group *amd_iommu_groups[] = {
1764 &amd_iommu_group,
1765 NULL,
1766};
1767
1768
1769
1770
1771
1772
1773static void __init late_iommu_features_init(struct amd_iommu *iommu)
1774{
1775 u64 features;
1776
1777 if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
1778 return;
1779
1780
1781 features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
1782
1783 if (!iommu->features) {
1784 iommu->features = features;
1785 return;
1786 }
1787
1788
1789
1790
1791
1792 if (features != iommu->features)
1793 pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx).\n",
1794 features, iommu->features);
1795}
1796
1797static int __init iommu_init_pci(struct amd_iommu *iommu)
1798{
1799 int cap_ptr = iommu->cap_ptr;
1800 int ret;
1801
1802 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
1803 iommu->devid & 0xff);
1804 if (!iommu->dev)
1805 return -ENODEV;
1806
1807
1808 iommu->dev->match_driver = false;
1809
1810 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1811 &iommu->cap);
1812
1813 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1814 amd_iommu_iotlb_sup = false;
1815
1816 late_iommu_features_init(iommu);
1817
1818 if (iommu_feature(iommu, FEATURE_GT)) {
1819 int glxval;
1820 u32 max_pasid;
1821 u64 pasmax;
1822
1823 pasmax = iommu->features & FEATURE_PASID_MASK;
1824 pasmax >>= FEATURE_PASID_SHIFT;
1825 max_pasid = (1 << (pasmax + 1)) - 1;
1826
1827 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1828
1829 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
1830
1831 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1832 glxval >>= FEATURE_GLXVAL_SHIFT;
1833
1834 if (amd_iommu_max_glx_val == -1)
1835 amd_iommu_max_glx_val = glxval;
1836 else
1837 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1838 }
1839
1840 if (iommu_feature(iommu, FEATURE_GT) &&
1841 iommu_feature(iommu, FEATURE_PPR)) {
1842 iommu->is_iommu_v2 = true;
1843 amd_iommu_v2_present = true;
1844 }
1845
1846 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1847 return -ENOMEM;
1848
1849 ret = iommu_init_ga(iommu);
1850 if (ret)
1851 return ret;
1852
1853 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1854 amd_iommu_np_cache = true;
1855
1856 init_iommu_perf_ctr(iommu);
1857
1858 if (is_rd890_iommu(iommu->dev)) {
1859 int i, j;
1860
1861 iommu->root_pdev =
1862 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
1863 PCI_DEVFN(0, 0));
1864
1865
1866
1867
1868
1869
1870 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1871 &iommu->stored_addr_lo);
1872 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1873 &iommu->stored_addr_hi);
1874
1875
1876 iommu->stored_addr_lo &= ~1;
1877
1878 for (i = 0; i < 6; i++)
1879 for (j = 0; j < 0x12; j++)
1880 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1881
1882 for (i = 0; i < 0x83; i++)
1883 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1884 }
1885
1886 amd_iommu_erratum_746_workaround(iommu);
1887 amd_iommu_ats_write_check_workaround(iommu);
1888
1889 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1890 amd_iommu_groups, "ivhd%d", iommu->index);
1891 iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
1892
1893 return pci_enable_device(iommu->dev);
1894}
1895
1896static void print_iommu_info(void)
1897{
1898 static const char * const feat_str[] = {
1899 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1900 "IA", "GA", "HE", "PC"
1901 };
1902 struct amd_iommu *iommu;
1903
1904 for_each_iommu(iommu) {
1905 struct pci_dev *pdev = iommu->dev;
1906 int i;
1907
1908 pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
1909
1910 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1911 pr_info("Extended features (%#llx):", iommu->features);
1912
1913 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1914 if (iommu_feature(iommu, (1ULL << i)))
1915 pr_cont(" %s", feat_str[i]);
1916 }
1917
1918 if (iommu->features & FEATURE_GAM_VAPIC)
1919 pr_cont(" GA_vAPIC");
1920
1921 pr_cont("\n");
1922 }
1923 }
1924 if (irq_remapping_enabled) {
1925 pr_info("Interrupt remapping enabled\n");
1926 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
1927 pr_info("Virtual APIC enabled\n");
1928 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
1929 pr_info("X2APIC enabled\n");
1930 }
1931}
1932
1933static int __init amd_iommu_init_pci(void)
1934{
1935 struct amd_iommu *iommu;
1936 int ret;
1937
1938 for_each_iommu(iommu) {
1939 ret = iommu_init_pci(iommu);
1940 if (ret)
1941 break;
1942
1943
1944 iommu_set_cwwb_range(iommu);
1945 }
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957 ret = amd_iommu_init_api();
1958
1959 init_device_table_dma();
1960
1961 for_each_iommu(iommu)
1962 iommu_flush_all_caches(iommu);
1963
1964 if (!ret)
1965 print_iommu_info();
1966
1967 return ret;
1968}
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979static int iommu_setup_msi(struct amd_iommu *iommu)
1980{
1981 int r;
1982
1983 r = pci_enable_msi(iommu->dev);
1984 if (r)
1985 return r;
1986
1987 r = request_threaded_irq(iommu->dev->irq,
1988 amd_iommu_int_handler,
1989 amd_iommu_int_thread,
1990 0, "AMD-Vi",
1991 iommu);
1992
1993 if (r) {
1994 pci_disable_msi(iommu->dev);
1995 return r;
1996 }
1997
1998 return 0;
1999}
2000
2001union intcapxt {
2002 u64 capxt;
2003 struct {
2004 u64 reserved_0 : 2,
2005 dest_mode_logical : 1,
2006 reserved_1 : 5,
2007 destid_0_23 : 24,
2008 vector : 8,
2009 reserved_2 : 16,
2010 destid_24_31 : 8;
2011 };
2012} __attribute__ ((packed));
2013
2014
2015
2016
2017
2018
2019static void intcapxt_unmask_irq(struct irq_data *data)
2020{
2021}
2022
2023static void intcapxt_mask_irq(struct irq_data *data)
2024{
2025}
2026
2027static struct irq_chip intcapxt_controller;
2028
2029static int intcapxt_irqdomain_activate(struct irq_domain *domain,
2030 struct irq_data *irqd, bool reserve)
2031{
2032 struct amd_iommu *iommu = irqd->chip_data;
2033 struct irq_cfg *cfg = irqd_cfg(irqd);
2034 union intcapxt xt;
2035
2036 xt.capxt = 0ULL;
2037 xt.dest_mode_logical = apic->dest_mode_logical;
2038 xt.vector = cfg->vector;
2039 xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0);
2040 xt.destid_24_31 = cfg->dest_apicid >> 24;
2041
2042
2043
2044
2045
2046 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
2047 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
2048 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
2049 return 0;
2050}
2051
2052static void intcapxt_irqdomain_deactivate(struct irq_domain *domain,
2053 struct irq_data *irqd)
2054{
2055 intcapxt_mask_irq(irqd);
2056}
2057
2058
2059static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
2060 unsigned int nr_irqs, void *arg)
2061{
2062 struct irq_alloc_info *info = arg;
2063 int i, ret;
2064
2065 if (!info || info->type != X86_IRQ_ALLOC_TYPE_AMDVI)
2066 return -EINVAL;
2067
2068 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
2069 if (ret < 0)
2070 return ret;
2071
2072 for (i = virq; i < virq + nr_irqs; i++) {
2073 struct irq_data *irqd = irq_domain_get_irq_data(domain, i);
2074
2075 irqd->chip = &intcapxt_controller;
2076 irqd->chip_data = info->data;
2077 __irq_set_handler(i, handle_edge_irq, 0, "edge");
2078 }
2079
2080 return ret;
2081}
2082
2083static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq,
2084 unsigned int nr_irqs)
2085{
2086 irq_domain_free_irqs_top(domain, virq, nr_irqs);
2087}
2088
2089static int intcapxt_set_affinity(struct irq_data *irqd,
2090 const struct cpumask *mask, bool force)
2091{
2092 struct irq_data *parent = irqd->parent_data;
2093 int ret;
2094
2095 ret = parent->chip->irq_set_affinity(parent, mask, force);
2096 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
2097 return ret;
2098
2099 return intcapxt_irqdomain_activate(irqd->domain, irqd, false);
2100}
2101
2102static struct irq_chip intcapxt_controller = {
2103 .name = "IOMMU-MSI",
2104 .irq_unmask = intcapxt_unmask_irq,
2105 .irq_mask = intcapxt_mask_irq,
2106 .irq_ack = irq_chip_ack_parent,
2107 .irq_retrigger = irq_chip_retrigger_hierarchy,
2108 .irq_set_affinity = intcapxt_set_affinity,
2109 .flags = IRQCHIP_SKIP_SET_WAKE,
2110};
2111
2112static const struct irq_domain_ops intcapxt_domain_ops = {
2113 .alloc = intcapxt_irqdomain_alloc,
2114 .free = intcapxt_irqdomain_free,
2115 .activate = intcapxt_irqdomain_activate,
2116 .deactivate = intcapxt_irqdomain_deactivate,
2117};
2118
2119
2120static struct irq_domain *iommu_irqdomain;
2121
2122static struct irq_domain *iommu_get_irqdomain(void)
2123{
2124 struct fwnode_handle *fn;
2125
2126
2127 if (iommu_irqdomain)
2128 return iommu_irqdomain;
2129
2130 fn = irq_domain_alloc_named_fwnode("AMD-Vi-MSI");
2131 if (!fn)
2132 return NULL;
2133
2134 iommu_irqdomain = irq_domain_create_hierarchy(x86_vector_domain, 0, 0,
2135 fn, &intcapxt_domain_ops,
2136 NULL);
2137 if (!iommu_irqdomain)
2138 irq_domain_free_fwnode(fn);
2139
2140 return iommu_irqdomain;
2141}
2142
2143static int iommu_setup_intcapxt(struct amd_iommu *iommu)
2144{
2145 struct irq_domain *domain;
2146 struct irq_alloc_info info;
2147 int irq, ret;
2148
2149 domain = iommu_get_irqdomain();
2150 if (!domain)
2151 return -ENXIO;
2152
2153 init_irq_alloc_info(&info, NULL);
2154 info.type = X86_IRQ_ALLOC_TYPE_AMDVI;
2155 info.data = iommu;
2156
2157 irq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info);
2158 if (irq < 0) {
2159 irq_domain_remove(domain);
2160 return irq;
2161 }
2162
2163 ret = request_threaded_irq(irq, amd_iommu_int_handler,
2164 amd_iommu_int_thread, 0, "AMD-Vi", iommu);
2165 if (ret) {
2166 irq_domain_free_irqs(irq, 1);
2167 irq_domain_remove(domain);
2168 return ret;
2169 }
2170
2171 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2172 return 0;
2173}
2174
2175static int iommu_init_irq(struct amd_iommu *iommu)
2176{
2177 int ret;
2178
2179 if (iommu->int_enabled)
2180 goto enable_faults;
2181
2182 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2183 ret = iommu_setup_intcapxt(iommu);
2184 else if (iommu->dev->msi_cap)
2185 ret = iommu_setup_msi(iommu);
2186 else
2187 ret = -ENODEV;
2188
2189 if (ret)
2190 return ret;
2191
2192 iommu->int_enabled = true;
2193enable_faults:
2194 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
2195
2196 if (iommu->ppr_log != NULL)
2197 iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
2198
2199 iommu_ga_log_enable(iommu);
2200
2201 return 0;
2202}
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212static void __init free_unity_maps(void)
2213{
2214 struct unity_map_entry *entry, *next;
2215
2216 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
2217 list_del(&entry->list);
2218 kfree(entry);
2219 }
2220}
2221
2222
2223static int __init init_unity_map_range(struct ivmd_header *m)
2224{
2225 struct unity_map_entry *e = NULL;
2226 char *s;
2227
2228 e = kzalloc(sizeof(*e), GFP_KERNEL);
2229 if (e == NULL)
2230 return -ENOMEM;
2231
2232 switch (m->type) {
2233 default:
2234 kfree(e);
2235 return 0;
2236 case ACPI_IVMD_TYPE:
2237 s = "IVMD_TYPEi\t\t\t";
2238 e->devid_start = e->devid_end = m->devid;
2239 break;
2240 case ACPI_IVMD_TYPE_ALL:
2241 s = "IVMD_TYPE_ALL\t\t";
2242 e->devid_start = 0;
2243 e->devid_end = amd_iommu_last_bdf;
2244 break;
2245 case ACPI_IVMD_TYPE_RANGE:
2246 s = "IVMD_TYPE_RANGE\t\t";
2247 e->devid_start = m->devid;
2248 e->devid_end = m->aux;
2249 break;
2250 }
2251 e->address_start = PAGE_ALIGN(m->range_start);
2252 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2253 e->prot = m->flags >> 1;
2254
2255
2256
2257
2258
2259
2260
2261
2262 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2263 e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
2264
2265 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
2266 " range_start: %016llx range_end: %016llx flags: %x\n", s,
2267 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2268 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
2269 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2270 e->address_start, e->address_end, m->flags);
2271
2272 list_add_tail(&e->list, &amd_iommu_unity_map);
2273
2274 return 0;
2275}
2276
2277
2278static int __init init_memory_definitions(struct acpi_table_header *table)
2279{
2280 u8 *p = (u8 *)table, *end = (u8 *)table;
2281 struct ivmd_header *m;
2282
2283 end += table->length;
2284 p += IVRS_HEADER_LENGTH;
2285
2286 while (p < end) {
2287 m = (struct ivmd_header *)p;
2288 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
2289 init_unity_map_range(m);
2290
2291 p += m->length;
2292 }
2293
2294 return 0;
2295}
2296
2297
2298
2299
2300static void init_device_table_dma(void)
2301{
2302 u32 devid;
2303
2304 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2305 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
2306 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
2307 }
2308}
2309
2310static void __init uninit_device_table_dma(void)
2311{
2312 u32 devid;
2313
2314 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2315 amd_iommu_dev_table[devid].data[0] = 0ULL;
2316 amd_iommu_dev_table[devid].data[1] = 0ULL;
2317 }
2318}
2319
2320static void init_device_table(void)
2321{
2322 u32 devid;
2323
2324 if (!amd_iommu_irq_remap)
2325 return;
2326
2327 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2328 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
2329}
2330
2331static void iommu_init_flags(struct amd_iommu *iommu)
2332{
2333 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2334 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2335 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2336
2337 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2338 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2339 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2340
2341 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2342 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2343 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2344
2345 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2346 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2347 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2348
2349
2350
2351
2352 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2353
2354
2355 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
2356}
2357
2358static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2359{
2360 int i, j;
2361 u32 ioc_feature_control;
2362 struct pci_dev *pdev = iommu->root_pdev;
2363
2364
2365 if (!is_rd890_iommu(iommu->dev) || !pdev)
2366 return;
2367
2368
2369
2370
2371
2372
2373
2374 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2375 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2376
2377
2378 if (!(ioc_feature_control & 0x1))
2379 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2380
2381
2382 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2383 iommu->stored_addr_lo);
2384 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2385 iommu->stored_addr_hi);
2386
2387
2388 for (i = 0; i < 6; i++)
2389 for (j = 0; j < 0x12; j++)
2390 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2391
2392
2393 for (i = 0; i < 0x83; i++)
2394 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2395
2396
2397 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2398 iommu->stored_addr_lo | 1);
2399}
2400
2401static void iommu_enable_ga(struct amd_iommu *iommu)
2402{
2403#ifdef CONFIG_IRQ_REMAP
2404 switch (amd_iommu_guest_ir) {
2405 case AMD_IOMMU_GUEST_IR_VAPIC:
2406 iommu_feature_enable(iommu, CONTROL_GAM_EN);
2407 fallthrough;
2408 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2409 iommu_feature_enable(iommu, CONTROL_GA_EN);
2410 iommu->irte_ops = &irte_128_ops;
2411 break;
2412 default:
2413 iommu->irte_ops = &irte_32_ops;
2414 break;
2415 }
2416#endif
2417}
2418
2419static void early_enable_iommu(struct amd_iommu *iommu)
2420{
2421 iommu_disable(iommu);
2422 iommu_init_flags(iommu);
2423 iommu_set_device_table(iommu);
2424 iommu_enable_command_buffer(iommu);
2425 iommu_enable_event_buffer(iommu);
2426 iommu_set_exclusion_range(iommu);
2427 iommu_enable_ga(iommu);
2428 iommu_enable_xt(iommu);
2429 iommu_enable(iommu);
2430 iommu_flush_all_caches(iommu);
2431}
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441static void early_enable_iommus(void)
2442{
2443 struct amd_iommu *iommu;
2444
2445
2446 if (!copy_device_table()) {
2447
2448
2449
2450
2451
2452 if (amd_iommu_pre_enabled)
2453 pr_err("Failed to copy DEV table from previous kernel.\n");
2454 if (old_dev_tbl_cpy != NULL)
2455 free_pages((unsigned long)old_dev_tbl_cpy,
2456 get_order(dev_table_size));
2457
2458 for_each_iommu(iommu) {
2459 clear_translation_pre_enabled(iommu);
2460 early_enable_iommu(iommu);
2461 }
2462 } else {
2463 pr_info("Copied DEV table from previous kernel.\n");
2464 free_pages((unsigned long)amd_iommu_dev_table,
2465 get_order(dev_table_size));
2466 amd_iommu_dev_table = old_dev_tbl_cpy;
2467 for_each_iommu(iommu) {
2468 iommu_disable_command_buffer(iommu);
2469 iommu_disable_event_buffer(iommu);
2470 iommu_enable_command_buffer(iommu);
2471 iommu_enable_event_buffer(iommu);
2472 iommu_enable_ga(iommu);
2473 iommu_enable_xt(iommu);
2474 iommu_set_device_table(iommu);
2475 iommu_flush_all_caches(iommu);
2476 }
2477 }
2478
2479#ifdef CONFIG_IRQ_REMAP
2480 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2481 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2482#endif
2483}
2484
2485static void enable_iommus_v2(void)
2486{
2487 struct amd_iommu *iommu;
2488
2489 for_each_iommu(iommu) {
2490 iommu_enable_ppr_log(iommu);
2491 iommu_enable_gt(iommu);
2492 }
2493}
2494
2495static void enable_iommus(void)
2496{
2497 early_enable_iommus();
2498
2499 enable_iommus_v2();
2500}
2501
2502static void disable_iommus(void)
2503{
2504 struct amd_iommu *iommu;
2505
2506 for_each_iommu(iommu)
2507 iommu_disable(iommu);
2508
2509#ifdef CONFIG_IRQ_REMAP
2510 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2511 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2512#endif
2513}
2514
2515
2516
2517
2518
2519
2520static void amd_iommu_resume(void)
2521{
2522 struct amd_iommu *iommu;
2523
2524 for_each_iommu(iommu)
2525 iommu_apply_resume_quirks(iommu);
2526
2527
2528 enable_iommus();
2529
2530 amd_iommu_enable_interrupts();
2531}
2532
2533static int amd_iommu_suspend(void)
2534{
2535
2536 disable_iommus();
2537
2538 return 0;
2539}
2540
2541static struct syscore_ops amd_iommu_syscore_ops = {
2542 .suspend = amd_iommu_suspend,
2543 .resume = amd_iommu_resume,
2544};
2545
2546static void __init free_iommu_resources(void)
2547{
2548 kmemleak_free(irq_lookup_table);
2549 free_pages((unsigned long)irq_lookup_table,
2550 get_order(rlookup_table_size));
2551 irq_lookup_table = NULL;
2552
2553 kmem_cache_destroy(amd_iommu_irq_cache);
2554 amd_iommu_irq_cache = NULL;
2555
2556 free_pages((unsigned long)amd_iommu_rlookup_table,
2557 get_order(rlookup_table_size));
2558 amd_iommu_rlookup_table = NULL;
2559
2560 free_pages((unsigned long)amd_iommu_alias_table,
2561 get_order(alias_table_size));
2562 amd_iommu_alias_table = NULL;
2563
2564 free_pages((unsigned long)amd_iommu_dev_table,
2565 get_order(dev_table_size));
2566 amd_iommu_dev_table = NULL;
2567
2568 free_iommu_all();
2569}
2570
2571
2572#define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2573
2574static bool __init check_ioapic_information(void)
2575{
2576 const char *fw_bug = FW_BUG;
2577 bool ret, has_sb_ioapic;
2578 int idx;
2579
2580 has_sb_ioapic = false;
2581 ret = false;
2582
2583
2584
2585
2586
2587
2588 if (cmdline_maps)
2589 fw_bug = "";
2590
2591 for (idx = 0; idx < nr_ioapics; idx++) {
2592 int devid, id = mpc_ioapic_id(idx);
2593
2594 devid = get_ioapic_devid(id);
2595 if (devid < 0) {
2596 pr_err("%s: IOAPIC[%d] not in IVRS table\n",
2597 fw_bug, id);
2598 ret = false;
2599 } else if (devid == IOAPIC_SB_DEVID) {
2600 has_sb_ioapic = true;
2601 ret = true;
2602 }
2603 }
2604
2605 if (!has_sb_ioapic) {
2606
2607
2608
2609
2610
2611
2612
2613
2614 pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
2615 }
2616
2617 if (!ret)
2618 pr_err("Disabling interrupt remapping\n");
2619
2620 return ret;
2621}
2622
2623static void __init free_dma_resources(void)
2624{
2625 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2626 get_order(MAX_DOMAIN_ID/8));
2627 amd_iommu_pd_alloc_bitmap = NULL;
2628
2629 free_unity_maps();
2630}
2631
2632static void __init ivinfo_init(void *ivrs)
2633{
2634 amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
2635}
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664static int __init early_amd_iommu_init(void)
2665{
2666 struct acpi_table_header *ivrs_base;
2667 int i, remap_cache_sz, ret;
2668 acpi_status status;
2669
2670 if (!amd_iommu_detected)
2671 return -ENODEV;
2672
2673 status = acpi_get_table("IVRS", 0, &ivrs_base);
2674 if (status == AE_NOT_FOUND)
2675 return -ENODEV;
2676 else if (ACPI_FAILURE(status)) {
2677 const char *err = acpi_format_exception(status);
2678 pr_err("IVRS table error: %s\n", err);
2679 return -EINVAL;
2680 }
2681
2682
2683
2684
2685
2686 ret = check_ivrs_checksum(ivrs_base);
2687 if (ret)
2688 goto out;
2689
2690 ivinfo_init(ivrs_base);
2691
2692 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2693 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2694
2695
2696
2697
2698
2699
2700 ret = find_last_devid_acpi(ivrs_base);
2701 if (ret)
2702 goto out;
2703
2704 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
2705 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2706 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
2707
2708
2709 ret = -ENOMEM;
2710 amd_iommu_dev_table = (void *)__get_free_pages(
2711 GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
2712 get_order(dev_table_size));
2713 if (amd_iommu_dev_table == NULL)
2714 goto out;
2715
2716
2717
2718
2719
2720 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2721 get_order(alias_table_size));
2722 if (amd_iommu_alias_table == NULL)
2723 goto out;
2724
2725
2726 amd_iommu_rlookup_table = (void *)__get_free_pages(
2727 GFP_KERNEL | __GFP_ZERO,
2728 get_order(rlookup_table_size));
2729 if (amd_iommu_rlookup_table == NULL)
2730 goto out;
2731
2732 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2733 GFP_KERNEL | __GFP_ZERO,
2734 get_order(MAX_DOMAIN_ID/8));
2735 if (amd_iommu_pd_alloc_bitmap == NULL)
2736 goto out;
2737
2738
2739
2740
2741 for (i = 0; i <= amd_iommu_last_bdf; ++i)
2742 amd_iommu_alias_table[i] = i;
2743
2744
2745
2746
2747
2748 __set_bit(0, amd_iommu_pd_alloc_bitmap);
2749
2750
2751
2752
2753
2754 ret = init_iommu_all(ivrs_base);
2755 if (ret)
2756 goto out;
2757
2758
2759 if (!is_kdump_kernel() || amd_iommu_disabled)
2760 disable_iommus();
2761
2762 if (amd_iommu_irq_remap)
2763 amd_iommu_irq_remap = check_ioapic_information();
2764
2765 if (amd_iommu_irq_remap) {
2766
2767
2768
2769
2770 ret = -ENOMEM;
2771 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2772 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2773 else
2774 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
2775 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
2776 remap_cache_sz,
2777 DTE_INTTAB_ALIGNMENT,
2778 0, NULL);
2779 if (!amd_iommu_irq_cache)
2780 goto out;
2781
2782 irq_lookup_table = (void *)__get_free_pages(
2783 GFP_KERNEL | __GFP_ZERO,
2784 get_order(rlookup_table_size));
2785 kmemleak_alloc(irq_lookup_table, rlookup_table_size,
2786 1, GFP_KERNEL);
2787 if (!irq_lookup_table)
2788 goto out;
2789 }
2790
2791 ret = init_memory_definitions(ivrs_base);
2792 if (ret)
2793 goto out;
2794
2795
2796 init_device_table();
2797
2798out:
2799
2800 acpi_put_table(ivrs_base);
2801
2802 return ret;
2803}
2804
2805static int amd_iommu_enable_interrupts(void)
2806{
2807 struct amd_iommu *iommu;
2808 int ret = 0;
2809
2810 for_each_iommu(iommu) {
2811 ret = iommu_init_irq(iommu);
2812 if (ret)
2813 goto out;
2814 }
2815
2816out:
2817 return ret;
2818}
2819
2820static bool __init detect_ivrs(void)
2821{
2822 struct acpi_table_header *ivrs_base;
2823 acpi_status status;
2824 int i;
2825
2826 status = acpi_get_table("IVRS", 0, &ivrs_base);
2827 if (status == AE_NOT_FOUND)
2828 return false;
2829 else if (ACPI_FAILURE(status)) {
2830 const char *err = acpi_format_exception(status);
2831 pr_err("IVRS table error: %s\n", err);
2832 return false;
2833 }
2834
2835 acpi_put_table(ivrs_base);
2836
2837 if (amd_iommu_force_enable)
2838 goto out;
2839
2840
2841 for (i = 0; i < 32; i++) {
2842 u32 pci_id;
2843
2844 pci_id = read_pci_config(0, i, 0, 0);
2845 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
2846 pr_info("Disable IOMMU on Stoney Ridge\n");
2847 return false;
2848 }
2849 }
2850
2851out:
2852
2853 pci_request_acs();
2854
2855 return true;
2856}
2857
2858
2859
2860
2861
2862
2863
2864static int __init state_next(void)
2865{
2866 int ret = 0;
2867
2868 switch (init_state) {
2869 case IOMMU_START_STATE:
2870 if (!detect_ivrs()) {
2871 init_state = IOMMU_NOT_FOUND;
2872 ret = -ENODEV;
2873 } else {
2874 init_state = IOMMU_IVRS_DETECTED;
2875 }
2876 break;
2877 case IOMMU_IVRS_DETECTED:
2878 if (amd_iommu_disabled) {
2879 init_state = IOMMU_CMDLINE_DISABLED;
2880 ret = -EINVAL;
2881 } else {
2882 ret = early_amd_iommu_init();
2883 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
2884 }
2885 break;
2886 case IOMMU_ACPI_FINISHED:
2887 early_enable_iommus();
2888 x86_platform.iommu_shutdown = disable_iommus;
2889 init_state = IOMMU_ENABLED;
2890 break;
2891 case IOMMU_ENABLED:
2892 register_syscore_ops(&amd_iommu_syscore_ops);
2893 ret = amd_iommu_init_pci();
2894 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2895 enable_iommus_v2();
2896 break;
2897 case IOMMU_PCI_INIT:
2898 ret = amd_iommu_enable_interrupts();
2899 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2900 break;
2901 case IOMMU_INTERRUPTS_EN:
2902 init_state = IOMMU_INITIALIZED;
2903 break;
2904 case IOMMU_INITIALIZED:
2905
2906 break;
2907 case IOMMU_NOT_FOUND:
2908 case IOMMU_INIT_ERROR:
2909 case IOMMU_CMDLINE_DISABLED:
2910
2911 ret = -EINVAL;
2912 break;
2913 default:
2914
2915 BUG();
2916 }
2917
2918 if (ret) {
2919 free_dma_resources();
2920 if (!irq_remapping_enabled) {
2921 disable_iommus();
2922 free_iommu_resources();
2923 } else {
2924 struct amd_iommu *iommu;
2925
2926 uninit_device_table_dma();
2927 for_each_iommu(iommu)
2928 iommu_flush_all_caches(iommu);
2929 }
2930 }
2931 return ret;
2932}
2933
2934static int __init iommu_go_to_state(enum iommu_init_state state)
2935{
2936 int ret = -EINVAL;
2937
2938 while (init_state != state) {
2939 if (init_state == IOMMU_NOT_FOUND ||
2940 init_state == IOMMU_INIT_ERROR ||
2941 init_state == IOMMU_CMDLINE_DISABLED)
2942 break;
2943 ret = state_next();
2944 }
2945
2946 return ret;
2947}
2948
2949#ifdef CONFIG_IRQ_REMAP
2950int __init amd_iommu_prepare(void)
2951{
2952 int ret;
2953
2954 amd_iommu_irq_remap = true;
2955
2956 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2957 if (ret) {
2958 amd_iommu_irq_remap = false;
2959 return ret;
2960 }
2961
2962 return amd_iommu_irq_remap ? 0 : -ENODEV;
2963}
2964
2965int __init amd_iommu_enable(void)
2966{
2967 int ret;
2968
2969 ret = iommu_go_to_state(IOMMU_ENABLED);
2970 if (ret)
2971 return ret;
2972
2973 irq_remapping_enabled = 1;
2974 return amd_iommu_xt_mode;
2975}
2976
2977void amd_iommu_disable(void)
2978{
2979 amd_iommu_suspend();
2980}
2981
2982int amd_iommu_reenable(int mode)
2983{
2984 amd_iommu_resume();
2985
2986 return 0;
2987}
2988
2989int __init amd_iommu_enable_faulting(void)
2990{
2991
2992 return 0;
2993}
2994#endif
2995
2996
2997
2998
2999
3000
3001static int __init amd_iommu_init(void)
3002{
3003 struct amd_iommu *iommu;
3004 int ret;
3005
3006 ret = iommu_go_to_state(IOMMU_INITIALIZED);
3007#ifdef CONFIG_GART_IOMMU
3008 if (ret && list_empty(&amd_iommu_list)) {
3009
3010
3011
3012
3013 gart_iommu_init();
3014 }
3015#endif
3016
3017 for_each_iommu(iommu)
3018 amd_iommu_debugfs_setup(iommu);
3019
3020 return ret;
3021}
3022
3023static bool amd_iommu_sme_check(void)
3024{
3025 if (!sme_active() || (boot_cpu_data.x86 != 0x17))
3026 return true;
3027
3028
3029 if (boot_cpu_data.microcode >= 0x08001205)
3030 return true;
3031
3032 if ((boot_cpu_data.microcode >= 0x08001126) &&
3033 (boot_cpu_data.microcode <= 0x080011ff))
3034 return true;
3035
3036 pr_notice("IOMMU not currently supported when SME is active\n");
3037
3038 return false;
3039}
3040
3041
3042
3043
3044
3045
3046
3047
3048int __init amd_iommu_detect(void)
3049{
3050 int ret;
3051
3052 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
3053 return -ENODEV;
3054
3055 if (!amd_iommu_sme_check())
3056 return -ENODEV;
3057
3058 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
3059 if (ret)
3060 return ret;
3061
3062 amd_iommu_detected = true;
3063 iommu_detected = 1;
3064 x86_init.iommu.iommu_init = amd_iommu_init;
3065
3066 return 1;
3067}
3068
3069
3070
3071
3072
3073
3074
3075
3076static int __init parse_amd_iommu_dump(char *str)
3077{
3078 amd_iommu_dump = true;
3079
3080 return 1;
3081}
3082
3083static int __init parse_amd_iommu_intr(char *str)
3084{
3085 for (; *str; ++str) {
3086 if (strncmp(str, "legacy", 6) == 0) {
3087 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
3088 break;
3089 }
3090 if (strncmp(str, "vapic", 5) == 0) {
3091 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
3092 break;
3093 }
3094 }
3095 return 1;
3096}
3097
3098static int __init parse_amd_iommu_options(char *str)
3099{
3100 for (; *str; ++str) {
3101 if (strncmp(str, "fullflush", 9) == 0)
3102 amd_iommu_unmap_flush = true;
3103 if (strncmp(str, "force_enable", 12) == 0)
3104 amd_iommu_force_enable = true;
3105 if (strncmp(str, "off", 3) == 0)
3106 amd_iommu_disabled = true;
3107 if (strncmp(str, "force_isolation", 15) == 0)
3108 amd_iommu_force_isolation = true;
3109 }
3110
3111 return 1;
3112}
3113
3114static int __init parse_ivrs_ioapic(char *str)
3115{
3116 unsigned int bus, dev, fn;
3117 int ret, id, i;
3118 u16 devid;
3119
3120 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
3121
3122 if (ret != 4) {
3123 pr_err("Invalid command line: ivrs_ioapic%s\n", str);
3124 return 1;
3125 }
3126
3127 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
3128 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
3129 str);
3130 return 1;
3131 }
3132
3133 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3134
3135 cmdline_maps = true;
3136 i = early_ioapic_map_size++;
3137 early_ioapic_map[i].id = id;
3138 early_ioapic_map[i].devid = devid;
3139 early_ioapic_map[i].cmd_line = true;
3140
3141 return 1;
3142}
3143
3144static int __init parse_ivrs_hpet(char *str)
3145{
3146 unsigned int bus, dev, fn;
3147 int ret, id, i;
3148 u16 devid;
3149
3150 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
3151
3152 if (ret != 4) {
3153 pr_err("Invalid command line: ivrs_hpet%s\n", str);
3154 return 1;
3155 }
3156
3157 if (early_hpet_map_size == EARLY_MAP_SIZE) {
3158 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
3159 str);
3160 return 1;
3161 }
3162
3163 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3164
3165 cmdline_maps = true;
3166 i = early_hpet_map_size++;
3167 early_hpet_map[i].id = id;
3168 early_hpet_map[i].devid = devid;
3169 early_hpet_map[i].cmd_line = true;
3170
3171 return 1;
3172}
3173
3174static int __init parse_ivrs_acpihid(char *str)
3175{
3176 u32 bus, dev, fn;
3177 char *hid, *uid, *p;
3178 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
3179 int ret, i;
3180
3181 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
3182 if (ret != 4) {
3183 pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
3184 return 1;
3185 }
3186
3187 p = acpiid;
3188 hid = strsep(&p, ":");
3189 uid = p;
3190
3191 if (!hid || !(*hid) || !uid) {
3192 pr_err("Invalid command line: hid or uid\n");
3193 return 1;
3194 }
3195
3196 i = early_acpihid_map_size++;
3197 memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
3198 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
3199 early_acpihid_map[i].devid =
3200 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3201 early_acpihid_map[i].cmd_line = true;
3202
3203 return 1;
3204}
3205
3206__setup("amd_iommu_dump", parse_amd_iommu_dump);
3207__setup("amd_iommu=", parse_amd_iommu_options);
3208__setup("amd_iommu_intr=", parse_amd_iommu_intr);
3209__setup("ivrs_ioapic", parse_ivrs_ioapic);
3210__setup("ivrs_hpet", parse_ivrs_hpet);
3211__setup("ivrs_acpihid", parse_ivrs_acpihid);
3212
3213IOMMU_INIT_FINISH(amd_iommu_detect,
3214 gart_iommu_hole_init,
3215 NULL,
3216 NULL);
3217
3218bool amd_iommu_v2_supported(void)
3219{
3220 return amd_iommu_v2_present;
3221}
3222EXPORT_SYMBOL(amd_iommu_v2_supported);
3223
3224struct amd_iommu *get_amd_iommu(unsigned int idx)
3225{
3226 unsigned int i = 0;
3227 struct amd_iommu *iommu;
3228
3229 for_each_iommu(iommu)
3230 if (i++ == idx)
3231 return iommu;
3232 return NULL;
3233}
3234
3235
3236
3237
3238
3239
3240
3241
3242u8 amd_iommu_pc_get_max_banks(unsigned int idx)
3243{
3244 struct amd_iommu *iommu = get_amd_iommu(idx);
3245
3246 if (iommu)
3247 return iommu->max_banks;
3248
3249 return 0;
3250}
3251EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
3252
3253bool amd_iommu_pc_supported(void)
3254{
3255 return amd_iommu_pc_present;
3256}
3257EXPORT_SYMBOL(amd_iommu_pc_supported);
3258
3259u8 amd_iommu_pc_get_max_counters(unsigned int idx)
3260{
3261 struct amd_iommu *iommu = get_amd_iommu(idx);
3262
3263 if (iommu)
3264 return iommu->max_counters;
3265
3266 return 0;
3267}
3268EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
3269
3270static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3271 u8 fxn, u64 *value, bool is_write)
3272{
3273 u32 offset;
3274 u32 max_offset_lim;
3275
3276
3277 if (!amd_iommu_pc_present)
3278 return -ENODEV;
3279
3280
3281 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3282 return -ENODEV;
3283
3284 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
3285
3286
3287 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3288 (iommu->max_counters << 8) | 0x28);
3289 if ((offset < MMIO_CNTR_REG_OFFSET) ||
3290 (offset > max_offset_lim))
3291 return -EINVAL;
3292
3293 if (is_write) {
3294 u64 val = *value & GENMASK_ULL(47, 0);
3295
3296 writel((u32)val, iommu->mmio_base + offset);
3297 writel((val >> 32), iommu->mmio_base + offset + 4);
3298 } else {
3299 *value = readl(iommu->mmio_base + offset + 4);
3300 *value <<= 32;
3301 *value |= readl(iommu->mmio_base + offset);
3302 *value &= GENMASK_ULL(47, 0);
3303 }
3304
3305 return 0;
3306}
3307
3308int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3309{
3310 if (!iommu)
3311 return -EINVAL;
3312
3313 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3314}
3315
3316int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3317{
3318 if (!iommu)
3319 return -EINVAL;
3320
3321 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
3322}
3323