1
2
3
4
5
6
7
8#define pr_fmt(fmt) "AMD-Vi: " fmt
9#define dev_fmt(fmt) pr_fmt(fmt)
10
11#include <linux/ratelimit.h>
12#include <linux/pci.h>
13#include <linux/acpi.h>
14#include <linux/amba/bus.h>
15#include <linux/platform_device.h>
16#include <linux/pci-ats.h>
17#include <linux/bitmap.h>
18#include <linux/slab.h>
19#include <linux/debugfs.h>
20#include <linux/scatterlist.h>
21#include <linux/dma-map-ops.h>
22#include <linux/dma-direct.h>
23#include <linux/dma-iommu.h>
24#include <linux/iommu-helper.h>
25#include <linux/delay.h>
26#include <linux/amd-iommu.h>
27#include <linux/notifier.h>
28#include <linux/export.h>
29#include <linux/irq.h>
30#include <linux/msi.h>
31#include <linux/irqdomain.h>
32#include <linux/percpu.h>
33#include <linux/io-pgtable.h>
34#include <linux/cc_platform.h>
35#include <asm/irq_remapping.h>
36#include <asm/io_apic.h>
37#include <asm/apic.h>
38#include <asm/hw_irq.h>
39#include <asm/proto.h>
40#include <asm/iommu.h>
41#include <asm/gart.h>
42#include <asm/dma.h>
43
44#include "amd_iommu.h"
45#include "../irq_remapping.h"
46
47#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
48
49#define LOOP_TIMEOUT 100000
50
51
52#define IOVA_START_PFN (1)
53#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
54
55
56#define MSI_RANGE_START (0xfee00000)
57#define MSI_RANGE_END (0xfeefffff)
58#define HT_RANGE_START (0xfd00000000ULL)
59#define HT_RANGE_END (0xffffffffffULL)
60
61#define DEFAULT_PGTABLE_LEVEL PAGE_MODE_3_LEVEL
62
63static DEFINE_SPINLOCK(pd_bitmap_lock);
64
65
66static LLIST_HEAD(dev_data_list);
67
68LIST_HEAD(ioapic_map);
69LIST_HEAD(hpet_map);
70LIST_HEAD(acpihid_map);
71
72
73
74
75
76const struct iommu_ops amd_iommu_ops;
77
78static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
79int amd_iommu_max_glx_val = -1;
80
81
82
83
84struct iommu_cmd {
85 u32 data[4];
86};
87
88struct kmem_cache *amd_iommu_irq_cache;
89
90static void detach_device(struct device *dev);
91
92
93
94
95
96
97
98static inline u16 get_pci_device_id(struct device *dev)
99{
100 struct pci_dev *pdev = to_pci_dev(dev);
101
102 return pci_dev_id(pdev);
103}
104
105static inline int get_acpihid_device_id(struct device *dev,
106 struct acpihid_map_entry **entry)
107{
108 struct acpi_device *adev = ACPI_COMPANION(dev);
109 struct acpihid_map_entry *p;
110
111 if (!adev)
112 return -ENODEV;
113
114 list_for_each_entry(p, &acpihid_map, list) {
115 if (acpi_dev_hid_uid_match(adev, p->hid,
116 p->uid[0] ? p->uid : NULL)) {
117 if (entry)
118 *entry = p;
119 return p->devid;
120 }
121 }
122 return -EINVAL;
123}
124
125static inline int get_device_id(struct device *dev)
126{
127 int devid;
128
129 if (dev_is_pci(dev))
130 devid = get_pci_device_id(dev);
131 else
132 devid = get_acpihid_device_id(dev, NULL);
133
134 return devid;
135}
136
137static struct protection_domain *to_pdomain(struct iommu_domain *dom)
138{
139 return container_of(dom, struct protection_domain, domain);
140}
141
142static struct iommu_dev_data *alloc_dev_data(u16 devid)
143{
144 struct iommu_dev_data *dev_data;
145
146 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
147 if (!dev_data)
148 return NULL;
149
150 spin_lock_init(&dev_data->lock);
151 dev_data->devid = devid;
152 ratelimit_default_init(&dev_data->rs);
153
154 llist_add(&dev_data->dev_data_list, &dev_data_list);
155 return dev_data;
156}
157
158static struct iommu_dev_data *search_dev_data(u16 devid)
159{
160 struct iommu_dev_data *dev_data;
161 struct llist_node *node;
162
163 if (llist_empty(&dev_data_list))
164 return NULL;
165
166 node = dev_data_list.first;
167 llist_for_each_entry(dev_data, node, dev_data_list) {
168 if (dev_data->devid == devid)
169 return dev_data;
170 }
171
172 return NULL;
173}
174
175static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
176{
177 u16 devid = pci_dev_id(pdev);
178
179 if (devid == alias)
180 return 0;
181
182 amd_iommu_rlookup_table[alias] =
183 amd_iommu_rlookup_table[devid];
184 memcpy(amd_iommu_dev_table[alias].data,
185 amd_iommu_dev_table[devid].data,
186 sizeof(amd_iommu_dev_table[alias].data));
187
188 return 0;
189}
190
191static void clone_aliases(struct pci_dev *pdev)
192{
193 if (!pdev)
194 return;
195
196
197
198
199
200
201 clone_alias(pdev, amd_iommu_alias_table[pci_dev_id(pdev)], NULL);
202
203 pci_for_each_dma_alias(pdev, clone_alias, NULL);
204}
205
206static struct pci_dev *setup_aliases(struct device *dev)
207{
208 struct pci_dev *pdev = to_pci_dev(dev);
209 u16 ivrs_alias;
210
211
212 if (!dev_is_pci(dev))
213 return NULL;
214
215
216
217
218
219 ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)];
220 if (ivrs_alias != pci_dev_id(pdev) &&
221 PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
222 pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
223
224 clone_aliases(pdev);
225
226 return pdev;
227}
228
229static struct iommu_dev_data *find_dev_data(u16 devid)
230{
231 struct iommu_dev_data *dev_data;
232 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
233
234 dev_data = search_dev_data(devid);
235
236 if (dev_data == NULL) {
237 dev_data = alloc_dev_data(devid);
238 if (!dev_data)
239 return NULL;
240
241 if (translation_pre_enabled(iommu))
242 dev_data->defer_attach = true;
243 }
244
245 return dev_data;
246}
247
248
249
250
251static struct iommu_group *acpihid_device_group(struct device *dev)
252{
253 struct acpihid_map_entry *p, *entry = NULL;
254 int devid;
255
256 devid = get_acpihid_device_id(dev, &entry);
257 if (devid < 0)
258 return ERR_PTR(devid);
259
260 list_for_each_entry(p, &acpihid_map, list) {
261 if ((devid == p->devid) && p->group)
262 entry->group = p->group;
263 }
264
265 if (!entry->group)
266 entry->group = generic_device_group(dev);
267 else
268 iommu_group_ref_get(entry->group);
269
270 return entry->group;
271}
272
273static bool pci_iommuv2_capable(struct pci_dev *pdev)
274{
275 static const int caps[] = {
276 PCI_EXT_CAP_ID_PRI,
277 PCI_EXT_CAP_ID_PASID,
278 };
279 int i, pos;
280
281 if (!pci_ats_supported(pdev))
282 return false;
283
284 for (i = 0; i < 2; ++i) {
285 pos = pci_find_ext_capability(pdev, caps[i]);
286 if (pos == 0)
287 return false;
288 }
289
290 return true;
291}
292
293
294
295
296
297static bool check_device(struct device *dev)
298{
299 int devid;
300
301 if (!dev)
302 return false;
303
304 devid = get_device_id(dev);
305 if (devid < 0)
306 return false;
307
308
309 if (devid > amd_iommu_last_bdf)
310 return false;
311
312 if (amd_iommu_rlookup_table[devid] == NULL)
313 return false;
314
315 return true;
316}
317
318static int iommu_init_device(struct device *dev)
319{
320 struct iommu_dev_data *dev_data;
321 int devid;
322
323 if (dev_iommu_priv_get(dev))
324 return 0;
325
326 devid = get_device_id(dev);
327 if (devid < 0)
328 return devid;
329
330 dev_data = find_dev_data(devid);
331 if (!dev_data)
332 return -ENOMEM;
333
334 dev_data->pdev = setup_aliases(dev);
335
336
337
338
339
340
341
342 if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
343 dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
344 struct amd_iommu *iommu;
345
346 iommu = amd_iommu_rlookup_table[dev_data->devid];
347 dev_data->iommu_v2 = iommu->is_iommu_v2;
348 }
349
350 dev_iommu_priv_set(dev, dev_data);
351
352 return 0;
353}
354
355static void iommu_ignore_device(struct device *dev)
356{
357 int devid;
358
359 devid = get_device_id(dev);
360 if (devid < 0)
361 return;
362
363 amd_iommu_rlookup_table[devid] = NULL;
364 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
365
366 setup_aliases(dev);
367}
368
369static void amd_iommu_uninit_device(struct device *dev)
370{
371 struct iommu_dev_data *dev_data;
372
373 dev_data = dev_iommu_priv_get(dev);
374 if (!dev_data)
375 return;
376
377 if (dev_data->domain)
378 detach_device(dev);
379
380 dev_iommu_priv_set(dev, NULL);
381
382
383
384
385
386}
387
388
389
390
391
392
393
394static void dump_dte_entry(u16 devid)
395{
396 int i;
397
398 for (i = 0; i < 4; ++i)
399 pr_err("DTE[%d]: %016llx\n", i,
400 amd_iommu_dev_table[devid].data[i]);
401}
402
403static void dump_command(unsigned long phys_addr)
404{
405 struct iommu_cmd *cmd = iommu_phys_to_virt(phys_addr);
406 int i;
407
408 for (i = 0; i < 4; ++i)
409 pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
410}
411
412static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
413{
414 struct iommu_dev_data *dev_data = NULL;
415 int devid, vmg_tag, flags;
416 struct pci_dev *pdev;
417 u64 spa;
418
419 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
420 vmg_tag = (event[1]) & 0xFFFF;
421 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
422 spa = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8);
423
424 pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
425 devid & 0xff);
426 if (pdev)
427 dev_data = dev_iommu_priv_get(&pdev->dev);
428
429 if (dev_data) {
430 if (__ratelimit(&dev_data->rs)) {
431 pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
432 vmg_tag, spa, flags);
433 }
434 } else {
435 pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
436 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
437 vmg_tag, spa, flags);
438 }
439
440 if (pdev)
441 pci_dev_put(pdev);
442}
443
444static void amd_iommu_report_rmp_fault(volatile u32 *event)
445{
446 struct iommu_dev_data *dev_data = NULL;
447 int devid, flags_rmp, vmg_tag, flags;
448 struct pci_dev *pdev;
449 u64 gpa;
450
451 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
452 flags_rmp = (event[0] >> EVENT_FLAGS_SHIFT) & 0xFF;
453 vmg_tag = (event[1]) & 0xFFFF;
454 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
455 gpa = ((u64)event[3] << 32) | event[2];
456
457 pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
458 devid & 0xff);
459 if (pdev)
460 dev_data = dev_iommu_priv_get(&pdev->dev);
461
462 if (dev_data) {
463 if (__ratelimit(&dev_data->rs)) {
464 pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
465 vmg_tag, gpa, flags_rmp, flags);
466 }
467 } else {
468 pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
469 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
470 vmg_tag, gpa, flags_rmp, flags);
471 }
472
473 if (pdev)
474 pci_dev_put(pdev);
475}
476
477#define IS_IOMMU_MEM_TRANSACTION(flags) \
478 (((flags) & EVENT_FLAG_I) == 0)
479
480#define IS_WRITE_REQUEST(flags) \
481 ((flags) & EVENT_FLAG_RW)
482
483static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
484 u64 address, int flags)
485{
486 struct iommu_dev_data *dev_data = NULL;
487 struct pci_dev *pdev;
488
489 pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
490 devid & 0xff);
491 if (pdev)
492 dev_data = dev_iommu_priv_get(&pdev->dev);
493
494 if (dev_data) {
495
496
497
498
499
500 if (IS_IOMMU_MEM_TRANSACTION(flags)) {
501 if (!report_iommu_fault(&dev_data->domain->domain,
502 &pdev->dev, address,
503 IS_WRITE_REQUEST(flags) ?
504 IOMMU_FAULT_WRITE :
505 IOMMU_FAULT_READ))
506 goto out;
507 }
508
509 if (__ratelimit(&dev_data->rs)) {
510 pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
511 domain_id, address, flags);
512 }
513 } else {
514 pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
515 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
516 domain_id, address, flags);
517 }
518
519out:
520 if (pdev)
521 pci_dev_put(pdev);
522}
523
524static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
525{
526 struct device *dev = iommu->iommu.dev;
527 int type, devid, flags, tag;
528 volatile u32 *event = __evt;
529 int count = 0;
530 u64 address;
531 u32 pasid;
532
533retry:
534 type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
535 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
536 pasid = (event[0] & EVENT_DOMID_MASK_HI) |
537 (event[1] & EVENT_DOMID_MASK_LO);
538 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
539 address = (u64)(((u64)event[3]) << 32) | event[2];
540
541 if (type == 0) {
542
543 if (++count == LOOP_TIMEOUT) {
544 pr_err("No event written to event log\n");
545 return;
546 }
547 udelay(1);
548 goto retry;
549 }
550
551 if (type == EVENT_TYPE_IO_FAULT) {
552 amd_iommu_report_page_fault(devid, pasid, address, flags);
553 return;
554 }
555
556 switch (type) {
557 case EVENT_TYPE_ILL_DEV:
558 dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
559 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
560 pasid, address, flags);
561 dump_dte_entry(devid);
562 break;
563 case EVENT_TYPE_DEV_TAB_ERR:
564 dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
565 "address=0x%llx flags=0x%04x]\n",
566 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
567 address, flags);
568 break;
569 case EVENT_TYPE_PAGE_TAB_ERR:
570 dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
571 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
572 pasid, address, flags);
573 break;
574 case EVENT_TYPE_ILL_CMD:
575 dev_err(dev, "Event logged [ILLEGAL_COMMAND_ERROR address=0x%llx]\n", address);
576 dump_command(address);
577 break;
578 case EVENT_TYPE_CMD_HARD_ERR:
579 dev_err(dev, "Event logged [COMMAND_HARDWARE_ERROR address=0x%llx flags=0x%04x]\n",
580 address, flags);
581 break;
582 case EVENT_TYPE_IOTLB_INV_TO:
583 dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%02x:%02x.%x address=0x%llx]\n",
584 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
585 address);
586 break;
587 case EVENT_TYPE_INV_DEV_REQ:
588 dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
589 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
590 pasid, address, flags);
591 break;
592 case EVENT_TYPE_RMP_FAULT:
593 amd_iommu_report_rmp_fault(event);
594 break;
595 case EVENT_TYPE_RMP_HW_ERR:
596 amd_iommu_report_rmp_hw_error(event);
597 break;
598 case EVENT_TYPE_INV_PPR_REQ:
599 pasid = PPR_PASID(*((u64 *)__evt));
600 tag = event[1] & 0x03FF;
601 dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
602 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
603 pasid, address, flags, tag);
604 break;
605 default:
606 dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
607 event[0], event[1], event[2], event[3]);
608 }
609
610 memset(__evt, 0, 4 * sizeof(u32));
611}
612
613static void iommu_poll_events(struct amd_iommu *iommu)
614{
615 u32 head, tail;
616
617 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
618 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
619
620 while (head != tail) {
621 iommu_print_event(iommu, iommu->evt_buf + head);
622 head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE;
623 }
624
625 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
626}
627
628static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
629{
630 struct amd_iommu_fault fault;
631
632 if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
633 pr_err_ratelimited("Unknown PPR request received\n");
634 return;
635 }
636
637 fault.address = raw[1];
638 fault.pasid = PPR_PASID(raw[0]);
639 fault.device_id = PPR_DEVID(raw[0]);
640 fault.tag = PPR_TAG(raw[0]);
641 fault.flags = PPR_FLAGS(raw[0]);
642
643 atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
644}
645
646static void iommu_poll_ppr_log(struct amd_iommu *iommu)
647{
648 u32 head, tail;
649
650 if (iommu->ppr_log == NULL)
651 return;
652
653 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
654 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
655
656 while (head != tail) {
657 volatile u64 *raw;
658 u64 entry[2];
659 int i;
660
661 raw = (u64 *)(iommu->ppr_log + head);
662
663
664
665
666
667
668 for (i = 0; i < LOOP_TIMEOUT; ++i) {
669 if (PPR_REQ_TYPE(raw[0]) != 0)
670 break;
671 udelay(1);
672 }
673
674
675 entry[0] = raw[0];
676 entry[1] = raw[1];
677
678
679
680
681
682 raw[0] = raw[1] = 0UL;
683
684
685 head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
686 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
687
688
689 iommu_handle_ppr_entry(iommu, entry);
690
691
692 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
693 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
694 }
695}
696
697#ifdef CONFIG_IRQ_REMAP
698static int (*iommu_ga_log_notifier)(u32);
699
700int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
701{
702 iommu_ga_log_notifier = notifier;
703
704 return 0;
705}
706EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
707
708static void iommu_poll_ga_log(struct amd_iommu *iommu)
709{
710 u32 head, tail, cnt = 0;
711
712 if (iommu->ga_log == NULL)
713 return;
714
715 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
716 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
717
718 while (head != tail) {
719 volatile u64 *raw;
720 u64 log_entry;
721
722 raw = (u64 *)(iommu->ga_log + head);
723 cnt++;
724
725
726 log_entry = *raw;
727
728
729 head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE;
730 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
731
732
733 switch (GA_REQ_TYPE(log_entry)) {
734 case GA_GUEST_NR:
735 if (!iommu_ga_log_notifier)
736 break;
737
738 pr_debug("%s: devid=%#x, ga_tag=%#x\n",
739 __func__, GA_DEVID(log_entry),
740 GA_TAG(log_entry));
741
742 if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0)
743 pr_err("GA log notifier failed.\n");
744 break;
745 default:
746 break;
747 }
748 }
749}
750
751static void
752amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu)
753{
754 if (!irq_remapping_enabled || !dev_is_pci(dev) ||
755 pci_dev_has_special_msi_domain(to_pci_dev(dev)))
756 return;
757
758 dev_set_msi_domain(dev, iommu->msi_domain);
759}
760
761#else
762static inline void
763amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
764#endif
765
766#define AMD_IOMMU_INT_MASK \
767 (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \
768 MMIO_STATUS_EVT_INT_MASK | \
769 MMIO_STATUS_PPR_INT_MASK | \
770 MMIO_STATUS_GALOG_INT_MASK)
771
772irqreturn_t amd_iommu_int_thread(int irq, void *data)
773{
774 struct amd_iommu *iommu = (struct amd_iommu *) data;
775 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
776
777 while (status & AMD_IOMMU_INT_MASK) {
778
779 writel(AMD_IOMMU_INT_MASK,
780 iommu->mmio_base + MMIO_STATUS_OFFSET);
781
782 if (status & MMIO_STATUS_EVT_INT_MASK) {
783 pr_devel("Processing IOMMU Event Log\n");
784 iommu_poll_events(iommu);
785 }
786
787 if (status & MMIO_STATUS_PPR_INT_MASK) {
788 pr_devel("Processing IOMMU PPR Log\n");
789 iommu_poll_ppr_log(iommu);
790 }
791
792#ifdef CONFIG_IRQ_REMAP
793 if (status & MMIO_STATUS_GALOG_INT_MASK) {
794 pr_devel("Processing IOMMU GA Log\n");
795 iommu_poll_ga_log(iommu);
796 }
797#endif
798
799 if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) {
800 pr_info_ratelimited("IOMMU event log overflow\n");
801 amd_iommu_restart_event_logging(iommu);
802 }
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
818 }
819 return IRQ_HANDLED;
820}
821
822irqreturn_t amd_iommu_int_handler(int irq, void *data)
823{
824 return IRQ_WAKE_THREAD;
825}
826
827
828
829
830
831
832
833static int wait_on_sem(struct amd_iommu *iommu, u64 data)
834{
835 int i = 0;
836
837 while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) {
838 udelay(1);
839 i += 1;
840 }
841
842 if (i == LOOP_TIMEOUT) {
843 pr_alert("Completion-Wait loop timed out\n");
844 return -EIO;
845 }
846
847 return 0;
848}
849
850static void copy_cmd_to_buffer(struct amd_iommu *iommu,
851 struct iommu_cmd *cmd)
852{
853 u8 *target;
854 u32 tail;
855
856
857 tail = iommu->cmd_buf_tail;
858 target = iommu->cmd_buf + tail;
859 memcpy(target, cmd, sizeof(*cmd));
860
861 tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
862 iommu->cmd_buf_tail = tail;
863
864
865 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
866}
867
868static void build_completion_wait(struct iommu_cmd *cmd,
869 struct amd_iommu *iommu,
870 u64 data)
871{
872 u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
873
874 memset(cmd, 0, sizeof(*cmd));
875 cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
876 cmd->data[1] = upper_32_bits(paddr);
877 cmd->data[2] = data;
878 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
879}
880
881static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
882{
883 memset(cmd, 0, sizeof(*cmd));
884 cmd->data[0] = devid;
885 CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
886}
887
888
889
890
891
892static inline u64 build_inv_address(u64 address, size_t size)
893{
894 u64 pages, end, msb_diff;
895
896 pages = iommu_num_pages(address, size, PAGE_SIZE);
897
898 if (pages == 1)
899 return address & PAGE_MASK;
900
901 end = address + size - 1;
902
903
904
905
906
907 msb_diff = fls64(end ^ address) - 1;
908
909
910
911
912
913 if (unlikely(msb_diff > 51)) {
914 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
915 } else {
916
917
918
919
920 address |= (1ull << msb_diff) - 1;
921 }
922
923
924 address &= PAGE_MASK;
925
926
927 return address | CMD_INV_IOMMU_PAGES_SIZE_MASK;
928}
929
930static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
931 size_t size, u16 domid, int pde)
932{
933 u64 inv_address = build_inv_address(address, size);
934
935 memset(cmd, 0, sizeof(*cmd));
936 cmd->data[1] |= domid;
937 cmd->data[2] = lower_32_bits(inv_address);
938 cmd->data[3] = upper_32_bits(inv_address);
939 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
940 if (pde)
941 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
942}
943
944static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
945 u64 address, size_t size)
946{
947 u64 inv_address = build_inv_address(address, size);
948
949 memset(cmd, 0, sizeof(*cmd));
950 cmd->data[0] = devid;
951 cmd->data[0] |= (qdep & 0xff) << 24;
952 cmd->data[1] = devid;
953 cmd->data[2] = lower_32_bits(inv_address);
954 cmd->data[3] = upper_32_bits(inv_address);
955 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
956}
957
958static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, u32 pasid,
959 u64 address, bool size)
960{
961 memset(cmd, 0, sizeof(*cmd));
962
963 address &= ~(0xfffULL);
964
965 cmd->data[0] = pasid;
966 cmd->data[1] = domid;
967 cmd->data[2] = lower_32_bits(address);
968 cmd->data[3] = upper_32_bits(address);
969 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
970 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
971 if (size)
972 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
973 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
974}
975
976static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, u32 pasid,
977 int qdep, u64 address, bool size)
978{
979 memset(cmd, 0, sizeof(*cmd));
980
981 address &= ~(0xfffULL);
982
983 cmd->data[0] = devid;
984 cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
985 cmd->data[0] |= (qdep & 0xff) << 24;
986 cmd->data[1] = devid;
987 cmd->data[1] |= (pasid & 0xff) << 16;
988 cmd->data[2] = lower_32_bits(address);
989 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
990 cmd->data[3] = upper_32_bits(address);
991 if (size)
992 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
993 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
994}
995
996static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid,
997 int status, int tag, bool gn)
998{
999 memset(cmd, 0, sizeof(*cmd));
1000
1001 cmd->data[0] = devid;
1002 if (gn) {
1003 cmd->data[1] = pasid;
1004 cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK;
1005 }
1006 cmd->data[3] = tag & 0x1ff;
1007 cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
1008
1009 CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
1010}
1011
1012static void build_inv_all(struct iommu_cmd *cmd)
1013{
1014 memset(cmd, 0, sizeof(*cmd));
1015 CMD_SET_TYPE(cmd, CMD_INV_ALL);
1016}
1017
1018static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
1019{
1020 memset(cmd, 0, sizeof(*cmd));
1021 cmd->data[0] = devid;
1022 CMD_SET_TYPE(cmd, CMD_INV_IRT);
1023}
1024
1025
1026
1027
1028
1029static int __iommu_queue_command_sync(struct amd_iommu *iommu,
1030 struct iommu_cmd *cmd,
1031 bool sync)
1032{
1033 unsigned int count = 0;
1034 u32 left, next_tail;
1035
1036 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
1037again:
1038 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE;
1039
1040 if (left <= 0x20) {
1041
1042 if (count++) {
1043 if (count == LOOP_TIMEOUT) {
1044 pr_err("Command buffer timeout\n");
1045 return -EIO;
1046 }
1047
1048 udelay(1);
1049 }
1050
1051
1052 iommu->cmd_buf_head = readl(iommu->mmio_base +
1053 MMIO_CMD_HEAD_OFFSET);
1054
1055 goto again;
1056 }
1057
1058 copy_cmd_to_buffer(iommu, cmd);
1059
1060
1061 iommu->need_sync = sync;
1062
1063 return 0;
1064}
1065
1066static int iommu_queue_command_sync(struct amd_iommu *iommu,
1067 struct iommu_cmd *cmd,
1068 bool sync)
1069{
1070 unsigned long flags;
1071 int ret;
1072
1073 raw_spin_lock_irqsave(&iommu->lock, flags);
1074 ret = __iommu_queue_command_sync(iommu, cmd, sync);
1075 raw_spin_unlock_irqrestore(&iommu->lock, flags);
1076
1077 return ret;
1078}
1079
1080static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
1081{
1082 return iommu_queue_command_sync(iommu, cmd, true);
1083}
1084
1085
1086
1087
1088
1089static int iommu_completion_wait(struct amd_iommu *iommu)
1090{
1091 struct iommu_cmd cmd;
1092 unsigned long flags;
1093 int ret;
1094 u64 data;
1095
1096 if (!iommu->need_sync)
1097 return 0;
1098
1099 raw_spin_lock_irqsave(&iommu->lock, flags);
1100
1101 data = ++iommu->cmd_sem_val;
1102 build_completion_wait(&cmd, iommu, data);
1103
1104 ret = __iommu_queue_command_sync(iommu, &cmd, false);
1105 if (ret)
1106 goto out_unlock;
1107
1108 ret = wait_on_sem(iommu, data);
1109
1110out_unlock:
1111 raw_spin_unlock_irqrestore(&iommu->lock, flags);
1112
1113 return ret;
1114}
1115
1116static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
1117{
1118 struct iommu_cmd cmd;
1119
1120 build_inv_dte(&cmd, devid);
1121
1122 return iommu_queue_command(iommu, &cmd);
1123}
1124
1125static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
1126{
1127 u32 devid;
1128
1129 for (devid = 0; devid <= 0xffff; ++devid)
1130 iommu_flush_dte(iommu, devid);
1131
1132 iommu_completion_wait(iommu);
1133}
1134
1135
1136
1137
1138
1139static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
1140{
1141 u32 dom_id;
1142
1143 for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
1144 struct iommu_cmd cmd;
1145 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1146 dom_id, 1);
1147 iommu_queue_command(iommu, &cmd);
1148 }
1149
1150 iommu_completion_wait(iommu);
1151}
1152
1153static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
1154{
1155 struct iommu_cmd cmd;
1156
1157 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1158 dom_id, 1);
1159 iommu_queue_command(iommu, &cmd);
1160
1161 iommu_completion_wait(iommu);
1162}
1163
1164static void amd_iommu_flush_all(struct amd_iommu *iommu)
1165{
1166 struct iommu_cmd cmd;
1167
1168 build_inv_all(&cmd);
1169
1170 iommu_queue_command(iommu, &cmd);
1171 iommu_completion_wait(iommu);
1172}
1173
1174static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
1175{
1176 struct iommu_cmd cmd;
1177
1178 build_inv_irt(&cmd, devid);
1179
1180 iommu_queue_command(iommu, &cmd);
1181}
1182
1183static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
1184{
1185 u32 devid;
1186
1187 for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
1188 iommu_flush_irt(iommu, devid);
1189
1190 iommu_completion_wait(iommu);
1191}
1192
1193void iommu_flush_all_caches(struct amd_iommu *iommu)
1194{
1195 if (iommu_feature(iommu, FEATURE_IA)) {
1196 amd_iommu_flush_all(iommu);
1197 } else {
1198 amd_iommu_flush_dte_all(iommu);
1199 amd_iommu_flush_irt_all(iommu);
1200 amd_iommu_flush_tlb_all(iommu);
1201 }
1202}
1203
1204
1205
1206
1207static int device_flush_iotlb(struct iommu_dev_data *dev_data,
1208 u64 address, size_t size)
1209{
1210 struct amd_iommu *iommu;
1211 struct iommu_cmd cmd;
1212 int qdep;
1213
1214 qdep = dev_data->ats.qdep;
1215 iommu = amd_iommu_rlookup_table[dev_data->devid];
1216
1217 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
1218
1219 return iommu_queue_command(iommu, &cmd);
1220}
1221
1222static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
1223{
1224 struct amd_iommu *iommu = data;
1225
1226 return iommu_flush_dte(iommu, alias);
1227}
1228
1229
1230
1231
1232static int device_flush_dte(struct iommu_dev_data *dev_data)
1233{
1234 struct amd_iommu *iommu;
1235 u16 alias;
1236 int ret;
1237
1238 iommu = amd_iommu_rlookup_table[dev_data->devid];
1239
1240 if (dev_data->pdev)
1241 ret = pci_for_each_dma_alias(dev_data->pdev,
1242 device_flush_dte_alias, iommu);
1243 else
1244 ret = iommu_flush_dte(iommu, dev_data->devid);
1245 if (ret)
1246 return ret;
1247
1248 alias = amd_iommu_alias_table[dev_data->devid];
1249 if (alias != dev_data->devid) {
1250 ret = iommu_flush_dte(iommu, alias);
1251 if (ret)
1252 return ret;
1253 }
1254
1255 if (dev_data->ats.enabled)
1256 ret = device_flush_iotlb(dev_data, 0, ~0UL);
1257
1258 return ret;
1259}
1260
1261
1262
1263
1264
1265
1266static void __domain_flush_pages(struct protection_domain *domain,
1267 u64 address, size_t size, int pde)
1268{
1269 struct iommu_dev_data *dev_data;
1270 struct iommu_cmd cmd;
1271 int ret = 0, i;
1272
1273 build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
1274
1275 for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
1276 if (!domain->dev_iommu[i])
1277 continue;
1278
1279
1280
1281
1282
1283 ret |= iommu_queue_command(amd_iommus[i], &cmd);
1284 }
1285
1286 list_for_each_entry(dev_data, &domain->dev_list, list) {
1287
1288 if (!dev_data->ats.enabled)
1289 continue;
1290
1291 ret |= device_flush_iotlb(dev_data, address, size);
1292 }
1293
1294 WARN_ON(ret);
1295}
1296
1297static void domain_flush_pages(struct protection_domain *domain,
1298 u64 address, size_t size, int pde)
1299{
1300 if (likely(!amd_iommu_np_cache)) {
1301 __domain_flush_pages(domain, address, size, pde);
1302 return;
1303 }
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315 while (size != 0) {
1316 int addr_alignment = __ffs(address);
1317 int size_alignment = __fls(size);
1318 int min_alignment;
1319 size_t flush_size;
1320
1321
1322
1323
1324
1325
1326
1327 if (likely((unsigned long)address != 0))
1328 min_alignment = min(addr_alignment, size_alignment);
1329 else
1330 min_alignment = size_alignment;
1331
1332 flush_size = 1ul << min_alignment;
1333
1334 __domain_flush_pages(domain, address, flush_size, pde);
1335 address += flush_size;
1336 size -= flush_size;
1337 }
1338}
1339
1340
1341void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain)
1342{
1343 domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
1344}
1345
1346void amd_iommu_domain_flush_complete(struct protection_domain *domain)
1347{
1348 int i;
1349
1350 for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
1351 if (domain && !domain->dev_iommu[i])
1352 continue;
1353
1354
1355
1356
1357
1358 iommu_completion_wait(amd_iommus[i]);
1359 }
1360}
1361
1362
1363static void domain_flush_np_cache(struct protection_domain *domain,
1364 dma_addr_t iova, size_t size)
1365{
1366 if (unlikely(amd_iommu_np_cache)) {
1367 unsigned long flags;
1368
1369 spin_lock_irqsave(&domain->lock, flags);
1370 domain_flush_pages(domain, iova, size, 1);
1371 amd_iommu_domain_flush_complete(domain);
1372 spin_unlock_irqrestore(&domain->lock, flags);
1373 }
1374}
1375
1376
1377
1378
1379
1380static void domain_flush_devices(struct protection_domain *domain)
1381{
1382 struct iommu_dev_data *dev_data;
1383
1384 list_for_each_entry(dev_data, &domain->dev_list, list)
1385 device_flush_dte(dev_data);
1386}
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398static u16 domain_id_alloc(void)
1399{
1400 int id;
1401
1402 spin_lock(&pd_bitmap_lock);
1403 id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
1404 BUG_ON(id == 0);
1405 if (id > 0 && id < MAX_DOMAIN_ID)
1406 __set_bit(id, amd_iommu_pd_alloc_bitmap);
1407 else
1408 id = 0;
1409 spin_unlock(&pd_bitmap_lock);
1410
1411 return id;
1412}
1413
1414static void domain_id_free(int id)
1415{
1416 spin_lock(&pd_bitmap_lock);
1417 if (id > 0 && id < MAX_DOMAIN_ID)
1418 __clear_bit(id, amd_iommu_pd_alloc_bitmap);
1419 spin_unlock(&pd_bitmap_lock);
1420}
1421
1422static void free_gcr3_tbl_level1(u64 *tbl)
1423{
1424 u64 *ptr;
1425 int i;
1426
1427 for (i = 0; i < 512; ++i) {
1428 if (!(tbl[i] & GCR3_VALID))
1429 continue;
1430
1431 ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
1432
1433 free_page((unsigned long)ptr);
1434 }
1435}
1436
1437static void free_gcr3_tbl_level2(u64 *tbl)
1438{
1439 u64 *ptr;
1440 int i;
1441
1442 for (i = 0; i < 512; ++i) {
1443 if (!(tbl[i] & GCR3_VALID))
1444 continue;
1445
1446 ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
1447
1448 free_gcr3_tbl_level1(ptr);
1449 }
1450}
1451
1452static void free_gcr3_table(struct protection_domain *domain)
1453{
1454 if (domain->glx == 2)
1455 free_gcr3_tbl_level2(domain->gcr3_tbl);
1456 else if (domain->glx == 1)
1457 free_gcr3_tbl_level1(domain->gcr3_tbl);
1458 else
1459 BUG_ON(domain->glx != 0);
1460
1461 free_page((unsigned long)domain->gcr3_tbl);
1462}
1463
1464static void set_dte_entry(u16 devid, struct protection_domain *domain,
1465 bool ats, bool ppr)
1466{
1467 u64 pte_root = 0;
1468 u64 flags = 0;
1469 u32 old_domid;
1470
1471 if (domain->iop.mode != PAGE_MODE_NONE)
1472 pte_root = iommu_virt_to_phys(domain->iop.root);
1473
1474 pte_root |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
1475 << DEV_ENTRY_MODE_SHIFT;
1476 pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV;
1477
1478 flags = amd_iommu_dev_table[devid].data[1];
1479
1480 if (ats)
1481 flags |= DTE_FLAG_IOTLB;
1482
1483 if (ppr) {
1484 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1485
1486 if (iommu_feature(iommu, FEATURE_EPHSUP))
1487 pte_root |= 1ULL << DEV_ENTRY_PPR;
1488 }
1489
1490 if (domain->flags & PD_IOMMUV2_MASK) {
1491 u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
1492 u64 glx = domain->glx;
1493 u64 tmp;
1494
1495 pte_root |= DTE_FLAG_GV;
1496 pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
1497
1498
1499 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
1500 flags &= ~tmp;
1501
1502 tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1503 flags &= ~tmp;
1504
1505
1506 tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
1507 pte_root |= tmp;
1508
1509 tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
1510 flags |= tmp;
1511
1512 tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
1513 flags |= tmp;
1514 }
1515
1516 flags &= ~DEV_DOMID_MASK;
1517 flags |= domain->id;
1518
1519 old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
1520 amd_iommu_dev_table[devid].data[1] = flags;
1521 amd_iommu_dev_table[devid].data[0] = pte_root;
1522
1523
1524
1525
1526
1527
1528 if (old_domid) {
1529 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1530
1531 amd_iommu_flush_tlb_domid(iommu, old_domid);
1532 }
1533}
1534
1535static void clear_dte_entry(u16 devid)
1536{
1537
1538 amd_iommu_dev_table[devid].data[0] = DTE_FLAG_V | DTE_FLAG_TV;
1539 amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
1540
1541 amd_iommu_apply_erratum_63(devid);
1542}
1543
1544static void do_attach(struct iommu_dev_data *dev_data,
1545 struct protection_domain *domain)
1546{
1547 struct amd_iommu *iommu;
1548 bool ats;
1549
1550 iommu = amd_iommu_rlookup_table[dev_data->devid];
1551 ats = dev_data->ats.enabled;
1552
1553
1554 dev_data->domain = domain;
1555 list_add(&dev_data->list, &domain->dev_list);
1556
1557
1558 domain->dev_iommu[iommu->index] += 1;
1559 domain->dev_cnt += 1;
1560
1561
1562 set_dte_entry(dev_data->devid, domain,
1563 ats, dev_data->iommu_v2);
1564 clone_aliases(dev_data->pdev);
1565
1566 device_flush_dte(dev_data);
1567}
1568
1569static void do_detach(struct iommu_dev_data *dev_data)
1570{
1571 struct protection_domain *domain = dev_data->domain;
1572 struct amd_iommu *iommu;
1573
1574 iommu = amd_iommu_rlookup_table[dev_data->devid];
1575
1576
1577 dev_data->domain = NULL;
1578 list_del(&dev_data->list);
1579 clear_dte_entry(dev_data->devid);
1580 clone_aliases(dev_data->pdev);
1581
1582
1583 device_flush_dte(dev_data);
1584
1585
1586 amd_iommu_domain_flush_tlb_pde(domain);
1587
1588
1589 amd_iommu_domain_flush_complete(domain);
1590
1591
1592 domain->dev_iommu[iommu->index] -= 1;
1593 domain->dev_cnt -= 1;
1594}
1595
1596static void pdev_iommuv2_disable(struct pci_dev *pdev)
1597{
1598 pci_disable_ats(pdev);
1599 pci_disable_pri(pdev);
1600 pci_disable_pasid(pdev);
1601}
1602
1603static int pdev_iommuv2_enable(struct pci_dev *pdev)
1604{
1605 int ret;
1606
1607
1608 ret = pci_enable_pasid(pdev, 0);
1609 if (ret)
1610 goto out_err;
1611
1612
1613 ret = pci_reset_pri(pdev);
1614 if (ret)
1615 goto out_err;
1616
1617
1618
1619 ret = pci_enable_pri(pdev, 32);
1620 if (ret)
1621 goto out_err;
1622
1623 ret = pci_enable_ats(pdev, PAGE_SHIFT);
1624 if (ret)
1625 goto out_err;
1626
1627 return 0;
1628
1629out_err:
1630 pci_disable_pri(pdev);
1631 pci_disable_pasid(pdev);
1632
1633 return ret;
1634}
1635
1636
1637
1638
1639
1640static int attach_device(struct device *dev,
1641 struct protection_domain *domain)
1642{
1643 struct iommu_dev_data *dev_data;
1644 struct pci_dev *pdev;
1645 unsigned long flags;
1646 int ret;
1647
1648 spin_lock_irqsave(&domain->lock, flags);
1649
1650 dev_data = dev_iommu_priv_get(dev);
1651
1652 spin_lock(&dev_data->lock);
1653
1654 ret = -EBUSY;
1655 if (dev_data->domain != NULL)
1656 goto out;
1657
1658 if (!dev_is_pci(dev))
1659 goto skip_ats_check;
1660
1661 pdev = to_pci_dev(dev);
1662 if (domain->flags & PD_IOMMUV2_MASK) {
1663 struct iommu_domain *def_domain = iommu_get_dma_domain(dev);
1664
1665 ret = -EINVAL;
1666 if (def_domain->type != IOMMU_DOMAIN_IDENTITY)
1667 goto out;
1668
1669 if (dev_data->iommu_v2) {
1670 if (pdev_iommuv2_enable(pdev) != 0)
1671 goto out;
1672
1673 dev_data->ats.enabled = true;
1674 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
1675 dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev);
1676 }
1677 } else if (amd_iommu_iotlb_sup &&
1678 pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
1679 dev_data->ats.enabled = true;
1680 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
1681 }
1682
1683skip_ats_check:
1684 ret = 0;
1685
1686 do_attach(dev_data, domain);
1687
1688
1689
1690
1691
1692
1693 amd_iommu_domain_flush_tlb_pde(domain);
1694
1695 amd_iommu_domain_flush_complete(domain);
1696
1697out:
1698 spin_unlock(&dev_data->lock);
1699
1700 spin_unlock_irqrestore(&domain->lock, flags);
1701
1702 return ret;
1703}
1704
1705
1706
1707
1708static void detach_device(struct device *dev)
1709{
1710 struct protection_domain *domain;
1711 struct iommu_dev_data *dev_data;
1712 unsigned long flags;
1713
1714 dev_data = dev_iommu_priv_get(dev);
1715 domain = dev_data->domain;
1716
1717 spin_lock_irqsave(&domain->lock, flags);
1718
1719 spin_lock(&dev_data->lock);
1720
1721
1722
1723
1724
1725
1726
1727 if (WARN_ON(!dev_data->domain))
1728 goto out;
1729
1730 do_detach(dev_data);
1731
1732 if (!dev_is_pci(dev))
1733 goto out;
1734
1735 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
1736 pdev_iommuv2_disable(to_pci_dev(dev));
1737 else if (dev_data->ats.enabled)
1738 pci_disable_ats(to_pci_dev(dev));
1739
1740 dev_data->ats.enabled = false;
1741
1742out:
1743 spin_unlock(&dev_data->lock);
1744
1745 spin_unlock_irqrestore(&domain->lock, flags);
1746}
1747
1748static struct iommu_device *amd_iommu_probe_device(struct device *dev)
1749{
1750 struct iommu_device *iommu_dev;
1751 struct amd_iommu *iommu;
1752 int ret, devid;
1753
1754 if (!check_device(dev))
1755 return ERR_PTR(-ENODEV);
1756
1757 devid = get_device_id(dev);
1758 iommu = amd_iommu_rlookup_table[devid];
1759
1760 if (dev_iommu_priv_get(dev))
1761 return &iommu->iommu;
1762
1763 ret = iommu_init_device(dev);
1764 if (ret) {
1765 if (ret != -ENOTSUPP)
1766 dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
1767 iommu_dev = ERR_PTR(ret);
1768 iommu_ignore_device(dev);
1769 } else {
1770 amd_iommu_set_pci_msi_domain(dev, iommu);
1771 iommu_dev = &iommu->iommu;
1772 }
1773
1774 iommu_completion_wait(iommu);
1775
1776 return iommu_dev;
1777}
1778
1779static void amd_iommu_probe_finalize(struct device *dev)
1780{
1781
1782 set_dma_ops(dev, NULL);
1783 iommu_setup_dma_ops(dev, 0, U64_MAX);
1784}
1785
1786static void amd_iommu_release_device(struct device *dev)
1787{
1788 int devid = get_device_id(dev);
1789 struct amd_iommu *iommu;
1790
1791 if (!check_device(dev))
1792 return;
1793
1794 iommu = amd_iommu_rlookup_table[devid];
1795
1796 amd_iommu_uninit_device(dev);
1797 iommu_completion_wait(iommu);
1798}
1799
1800static struct iommu_group *amd_iommu_device_group(struct device *dev)
1801{
1802 if (dev_is_pci(dev))
1803 return pci_device_group(dev);
1804
1805 return acpihid_device_group(dev);
1806}
1807
1808
1809
1810
1811
1812
1813
1814static void update_device_table(struct protection_domain *domain)
1815{
1816 struct iommu_dev_data *dev_data;
1817
1818 list_for_each_entry(dev_data, &domain->dev_list, list) {
1819 set_dte_entry(dev_data->devid, domain,
1820 dev_data->ats.enabled, dev_data->iommu_v2);
1821 clone_aliases(dev_data->pdev);
1822 }
1823}
1824
1825void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
1826{
1827 update_device_table(domain);
1828 domain_flush_devices(domain);
1829}
1830
1831void amd_iommu_domain_update(struct protection_domain *domain)
1832{
1833
1834 amd_iommu_update_and_flush_device_table(domain);
1835
1836
1837 amd_iommu_domain_flush_tlb_pde(domain);
1838 amd_iommu_domain_flush_complete(domain);
1839}
1840
1841static void __init amd_iommu_init_dma_ops(void)
1842{
1843 swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
1844}
1845
1846int __init amd_iommu_init_api(void)
1847{
1848 int err;
1849
1850 amd_iommu_init_dma_ops();
1851
1852 err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
1853 if (err)
1854 return err;
1855#ifdef CONFIG_ARM_AMBA
1856 err = bus_set_iommu(&amba_bustype, &amd_iommu_ops);
1857 if (err)
1858 return err;
1859#endif
1860 err = bus_set_iommu(&platform_bus_type, &amd_iommu_ops);
1861 if (err)
1862 return err;
1863
1864 return 0;
1865}
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877static void cleanup_domain(struct protection_domain *domain)
1878{
1879 struct iommu_dev_data *entry;
1880 unsigned long flags;
1881
1882 spin_lock_irqsave(&domain->lock, flags);
1883
1884 while (!list_empty(&domain->dev_list)) {
1885 entry = list_first_entry(&domain->dev_list,
1886 struct iommu_dev_data, list);
1887 BUG_ON(!entry->domain);
1888 do_detach(entry);
1889 }
1890
1891 spin_unlock_irqrestore(&domain->lock, flags);
1892}
1893
1894static void protection_domain_free(struct protection_domain *domain)
1895{
1896 if (!domain)
1897 return;
1898
1899 if (domain->id)
1900 domain_id_free(domain->id);
1901
1902 if (domain->iop.pgtbl_cfg.tlb)
1903 free_io_pgtable_ops(&domain->iop.iop.ops);
1904
1905 kfree(domain);
1906}
1907
1908static int protection_domain_init_v1(struct protection_domain *domain, int mode)
1909{
1910 u64 *pt_root = NULL;
1911
1912 BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
1913
1914 spin_lock_init(&domain->lock);
1915 domain->id = domain_id_alloc();
1916 if (!domain->id)
1917 return -ENOMEM;
1918 INIT_LIST_HEAD(&domain->dev_list);
1919
1920 if (mode != PAGE_MODE_NONE) {
1921 pt_root = (void *)get_zeroed_page(GFP_KERNEL);
1922 if (!pt_root)
1923 return -ENOMEM;
1924 }
1925
1926 amd_iommu_domain_set_pgtable(domain, pt_root, mode);
1927
1928 return 0;
1929}
1930
1931static struct protection_domain *protection_domain_alloc(unsigned int type)
1932{
1933 struct io_pgtable_ops *pgtbl_ops;
1934 struct protection_domain *domain;
1935 int pgtable = amd_iommu_pgtable;
1936 int mode = DEFAULT_PGTABLE_LEVEL;
1937 int ret;
1938
1939 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
1940 if (!domain)
1941 return NULL;
1942
1943
1944
1945
1946
1947 if (type == IOMMU_DOMAIN_IDENTITY) {
1948 pgtable = AMD_IOMMU_V1;
1949 mode = PAGE_MODE_NONE;
1950 } else if (type == IOMMU_DOMAIN_UNMANAGED) {
1951 pgtable = AMD_IOMMU_V1;
1952 }
1953
1954 switch (pgtable) {
1955 case AMD_IOMMU_V1:
1956 ret = protection_domain_init_v1(domain, mode);
1957 break;
1958 default:
1959 ret = -EINVAL;
1960 }
1961
1962 if (ret)
1963 goto out_err;
1964
1965 pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
1966 if (!pgtbl_ops)
1967 goto out_err;
1968
1969 return domain;
1970out_err:
1971 kfree(domain);
1972 return NULL;
1973}
1974
1975static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
1976{
1977 struct protection_domain *domain;
1978
1979 domain = protection_domain_alloc(type);
1980 if (!domain)
1981 return NULL;
1982
1983 domain->domain.geometry.aperture_start = 0;
1984 domain->domain.geometry.aperture_end = ~0ULL;
1985 domain->domain.geometry.force_aperture = true;
1986
1987 return &domain->domain;
1988}
1989
1990static void amd_iommu_domain_free(struct iommu_domain *dom)
1991{
1992 struct protection_domain *domain;
1993
1994 domain = to_pdomain(dom);
1995
1996 if (domain->dev_cnt > 0)
1997 cleanup_domain(domain);
1998
1999 BUG_ON(domain->dev_cnt != 0);
2000
2001 if (!dom)
2002 return;
2003
2004 if (domain->flags & PD_IOMMUV2_MASK)
2005 free_gcr3_table(domain);
2006
2007 protection_domain_free(domain);
2008}
2009
2010static void amd_iommu_detach_device(struct iommu_domain *dom,
2011 struct device *dev)
2012{
2013 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2014 int devid = get_device_id(dev);
2015 struct amd_iommu *iommu;
2016
2017 if (!check_device(dev))
2018 return;
2019
2020 if (dev_data->domain != NULL)
2021 detach_device(dev);
2022
2023 iommu = amd_iommu_rlookup_table[devid];
2024 if (!iommu)
2025 return;
2026
2027#ifdef CONFIG_IRQ_REMAP
2028 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
2029 (dom->type == IOMMU_DOMAIN_UNMANAGED))
2030 dev_data->use_vapic = 0;
2031#endif
2032
2033 iommu_completion_wait(iommu);
2034}
2035
2036static int amd_iommu_attach_device(struct iommu_domain *dom,
2037 struct device *dev)
2038{
2039 struct protection_domain *domain = to_pdomain(dom);
2040 struct iommu_dev_data *dev_data;
2041 struct amd_iommu *iommu;
2042 int ret;
2043
2044 if (!check_device(dev))
2045 return -EINVAL;
2046
2047 dev_data = dev_iommu_priv_get(dev);
2048 dev_data->defer_attach = false;
2049
2050 iommu = amd_iommu_rlookup_table[dev_data->devid];
2051 if (!iommu)
2052 return -EINVAL;
2053
2054 if (dev_data->domain)
2055 detach_device(dev);
2056
2057 ret = attach_device(dev, domain);
2058
2059#ifdef CONFIG_IRQ_REMAP
2060 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
2061 if (dom->type == IOMMU_DOMAIN_UNMANAGED)
2062 dev_data->use_vapic = 1;
2063 else
2064 dev_data->use_vapic = 0;
2065 }
2066#endif
2067
2068 iommu_completion_wait(iommu);
2069
2070 return ret;
2071}
2072
2073static void amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
2074 unsigned long iova, size_t size)
2075{
2076 struct protection_domain *domain = to_pdomain(dom);
2077 struct io_pgtable_ops *ops = &domain->iop.iop.ops;
2078
2079 if (ops->map)
2080 domain_flush_np_cache(domain, iova, size);
2081}
2082
2083static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
2084 phys_addr_t paddr, size_t page_size, int iommu_prot,
2085 gfp_t gfp)
2086{
2087 struct protection_domain *domain = to_pdomain(dom);
2088 struct io_pgtable_ops *ops = &domain->iop.iop.ops;
2089 int prot = 0;
2090 int ret = -EINVAL;
2091
2092 if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
2093 (domain->iop.mode == PAGE_MODE_NONE))
2094 return -EINVAL;
2095
2096 if (iommu_prot & IOMMU_READ)
2097 prot |= IOMMU_PROT_IR;
2098 if (iommu_prot & IOMMU_WRITE)
2099 prot |= IOMMU_PROT_IW;
2100
2101 if (ops->map)
2102 ret = ops->map(ops, iova, paddr, page_size, prot, gfp);
2103
2104 return ret;
2105}
2106
2107static void amd_iommu_iotlb_gather_add_page(struct iommu_domain *domain,
2108 struct iommu_iotlb_gather *gather,
2109 unsigned long iova, size_t size)
2110{
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121 if (amd_iommu_np_cache &&
2122 iommu_iotlb_gather_is_disjoint(gather, iova, size))
2123 iommu_iotlb_sync(domain, gather);
2124
2125 iommu_iotlb_gather_add_range(gather, iova, size);
2126}
2127
2128static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
2129 size_t page_size,
2130 struct iommu_iotlb_gather *gather)
2131{
2132 struct protection_domain *domain = to_pdomain(dom);
2133 struct io_pgtable_ops *ops = &domain->iop.iop.ops;
2134 size_t r;
2135
2136 if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
2137 (domain->iop.mode == PAGE_MODE_NONE))
2138 return 0;
2139
2140 r = (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;
2141
2142 amd_iommu_iotlb_gather_add_page(dom, gather, iova, page_size);
2143
2144 return r;
2145}
2146
2147static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
2148 dma_addr_t iova)
2149{
2150 struct protection_domain *domain = to_pdomain(dom);
2151 struct io_pgtable_ops *ops = &domain->iop.iop.ops;
2152
2153 return ops->iova_to_phys(ops, iova);
2154}
2155
2156static bool amd_iommu_capable(enum iommu_cap cap)
2157{
2158 switch (cap) {
2159 case IOMMU_CAP_CACHE_COHERENCY:
2160 return true;
2161 case IOMMU_CAP_INTR_REMAP:
2162 return (irq_remapping_enabled == 1);
2163 case IOMMU_CAP_NOEXEC:
2164 return false;
2165 default:
2166 break;
2167 }
2168
2169 return false;
2170}
2171
2172static void amd_iommu_get_resv_regions(struct device *dev,
2173 struct list_head *head)
2174{
2175 struct iommu_resv_region *region;
2176 struct unity_map_entry *entry;
2177 int devid;
2178
2179 devid = get_device_id(dev);
2180 if (devid < 0)
2181 return;
2182
2183 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
2184 int type, prot = 0;
2185 size_t length;
2186
2187 if (devid < entry->devid_start || devid > entry->devid_end)
2188 continue;
2189
2190 type = IOMMU_RESV_DIRECT;
2191 length = entry->address_end - entry->address_start;
2192 if (entry->prot & IOMMU_PROT_IR)
2193 prot |= IOMMU_READ;
2194 if (entry->prot & IOMMU_PROT_IW)
2195 prot |= IOMMU_WRITE;
2196 if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
2197
2198 type = IOMMU_RESV_RESERVED;
2199
2200 region = iommu_alloc_resv_region(entry->address_start,
2201 length, prot, type);
2202 if (!region) {
2203 dev_err(dev, "Out of memory allocating dm-regions\n");
2204 return;
2205 }
2206 list_add_tail(®ion->list, head);
2207 }
2208
2209 region = iommu_alloc_resv_region(MSI_RANGE_START,
2210 MSI_RANGE_END - MSI_RANGE_START + 1,
2211 0, IOMMU_RESV_MSI);
2212 if (!region)
2213 return;
2214 list_add_tail(®ion->list, head);
2215
2216 region = iommu_alloc_resv_region(HT_RANGE_START,
2217 HT_RANGE_END - HT_RANGE_START + 1,
2218 0, IOMMU_RESV_RESERVED);
2219 if (!region)
2220 return;
2221 list_add_tail(®ion->list, head);
2222}
2223
2224bool amd_iommu_is_attach_deferred(struct device *dev)
2225{
2226 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2227
2228 return dev_data->defer_attach;
2229}
2230EXPORT_SYMBOL_GPL(amd_iommu_is_attach_deferred);
2231
2232static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
2233{
2234 struct protection_domain *dom = to_pdomain(domain);
2235 unsigned long flags;
2236
2237 spin_lock_irqsave(&dom->lock, flags);
2238 amd_iommu_domain_flush_tlb_pde(dom);
2239 amd_iommu_domain_flush_complete(dom);
2240 spin_unlock_irqrestore(&dom->lock, flags);
2241}
2242
2243static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
2244 struct iommu_iotlb_gather *gather)
2245{
2246 struct protection_domain *dom = to_pdomain(domain);
2247 unsigned long flags;
2248
2249 spin_lock_irqsave(&dom->lock, flags);
2250 domain_flush_pages(dom, gather->start, gather->end - gather->start, 1);
2251 amd_iommu_domain_flush_complete(dom);
2252 spin_unlock_irqrestore(&dom->lock, flags);
2253}
2254
2255static int amd_iommu_def_domain_type(struct device *dev)
2256{
2257 struct iommu_dev_data *dev_data;
2258
2259 dev_data = dev_iommu_priv_get(dev);
2260 if (!dev_data)
2261 return 0;
2262
2263
2264
2265
2266
2267
2268 if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT) && dev_data->iommu_v2)
2269 return IOMMU_DOMAIN_IDENTITY;
2270
2271 return 0;
2272}
2273
2274const struct iommu_ops amd_iommu_ops = {
2275 .capable = amd_iommu_capable,
2276 .domain_alloc = amd_iommu_domain_alloc,
2277 .probe_device = amd_iommu_probe_device,
2278 .release_device = amd_iommu_release_device,
2279 .probe_finalize = amd_iommu_probe_finalize,
2280 .device_group = amd_iommu_device_group,
2281 .get_resv_regions = amd_iommu_get_resv_regions,
2282 .put_resv_regions = generic_iommu_put_resv_regions,
2283 .is_attach_deferred = amd_iommu_is_attach_deferred,
2284 .pgsize_bitmap = AMD_IOMMU_PGSIZES,
2285 .def_domain_type = amd_iommu_def_domain_type,
2286 .default_domain_ops = &(const struct iommu_domain_ops) {
2287 .attach_dev = amd_iommu_attach_device,
2288 .detach_dev = amd_iommu_detach_device,
2289 .map = amd_iommu_map,
2290 .unmap = amd_iommu_unmap,
2291 .iotlb_sync_map = amd_iommu_iotlb_sync_map,
2292 .iova_to_phys = amd_iommu_iova_to_phys,
2293 .flush_iotlb_all = amd_iommu_flush_iotlb_all,
2294 .iotlb_sync = amd_iommu_iotlb_sync,
2295 .free = amd_iommu_domain_free,
2296 }
2297};
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
2311{
2312 return atomic_notifier_chain_register(&ppr_notifier, nb);
2313}
2314EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
2315
2316int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
2317{
2318 return atomic_notifier_chain_unregister(&ppr_notifier, nb);
2319}
2320EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
2321
2322void amd_iommu_domain_direct_map(struct iommu_domain *dom)
2323{
2324 struct protection_domain *domain = to_pdomain(dom);
2325 unsigned long flags;
2326
2327 spin_lock_irqsave(&domain->lock, flags);
2328
2329 if (domain->iop.pgtbl_cfg.tlb)
2330 free_io_pgtable_ops(&domain->iop.iop.ops);
2331
2332 spin_unlock_irqrestore(&domain->lock, flags);
2333}
2334EXPORT_SYMBOL(amd_iommu_domain_direct_map);
2335
2336int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
2337{
2338 struct protection_domain *domain = to_pdomain(dom);
2339 unsigned long flags;
2340 int levels, ret;
2341
2342
2343 for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
2344 levels += 1;
2345
2346 if (levels > amd_iommu_max_glx_val)
2347 return -EINVAL;
2348
2349 spin_lock_irqsave(&domain->lock, flags);
2350
2351
2352
2353
2354
2355
2356 ret = -EBUSY;
2357 if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
2358 goto out;
2359
2360 ret = -ENOMEM;
2361 domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
2362 if (domain->gcr3_tbl == NULL)
2363 goto out;
2364
2365 domain->glx = levels;
2366 domain->flags |= PD_IOMMUV2_MASK;
2367
2368 amd_iommu_domain_update(domain);
2369
2370 ret = 0;
2371
2372out:
2373 spin_unlock_irqrestore(&domain->lock, flags);
2374
2375 return ret;
2376}
2377EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
2378
2379static int __flush_pasid(struct protection_domain *domain, u32 pasid,
2380 u64 address, bool size)
2381{
2382 struct iommu_dev_data *dev_data;
2383 struct iommu_cmd cmd;
2384 int i, ret;
2385
2386 if (!(domain->flags & PD_IOMMUV2_MASK))
2387 return -EINVAL;
2388
2389 build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
2390
2391
2392
2393
2394
2395 for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
2396 if (domain->dev_iommu[i] == 0)
2397 continue;
2398
2399 ret = iommu_queue_command(amd_iommus[i], &cmd);
2400 if (ret != 0)
2401 goto out;
2402 }
2403
2404
2405 amd_iommu_domain_flush_complete(domain);
2406
2407
2408 list_for_each_entry(dev_data, &domain->dev_list, list) {
2409 struct amd_iommu *iommu;
2410 int qdep;
2411
2412
2413
2414
2415
2416 if (!dev_data->ats.enabled)
2417 continue;
2418
2419 qdep = dev_data->ats.qdep;
2420 iommu = amd_iommu_rlookup_table[dev_data->devid];
2421
2422 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
2423 qdep, address, size);
2424
2425 ret = iommu_queue_command(iommu, &cmd);
2426 if (ret != 0)
2427 goto out;
2428 }
2429
2430
2431 amd_iommu_domain_flush_complete(domain);
2432
2433 ret = 0;
2434
2435out:
2436
2437 return ret;
2438}
2439
2440static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid,
2441 u64 address)
2442{
2443 return __flush_pasid(domain, pasid, address, false);
2444}
2445
2446int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
2447 u64 address)
2448{
2449 struct protection_domain *domain = to_pdomain(dom);
2450 unsigned long flags;
2451 int ret;
2452
2453 spin_lock_irqsave(&domain->lock, flags);
2454 ret = __amd_iommu_flush_page(domain, pasid, address);
2455 spin_unlock_irqrestore(&domain->lock, flags);
2456
2457 return ret;
2458}
2459EXPORT_SYMBOL(amd_iommu_flush_page);
2460
2461static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
2462{
2463 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
2464 true);
2465}
2466
2467int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
2468{
2469 struct protection_domain *domain = to_pdomain(dom);
2470 unsigned long flags;
2471 int ret;
2472
2473 spin_lock_irqsave(&domain->lock, flags);
2474 ret = __amd_iommu_flush_tlb(domain, pasid);
2475 spin_unlock_irqrestore(&domain->lock, flags);
2476
2477 return ret;
2478}
2479EXPORT_SYMBOL(amd_iommu_flush_tlb);
2480
2481static u64 *__get_gcr3_pte(u64 *root, int level, u32 pasid, bool alloc)
2482{
2483 int index;
2484 u64 *pte;
2485
2486 while (true) {
2487
2488 index = (pasid >> (9 * level)) & 0x1ff;
2489 pte = &root[index];
2490
2491 if (level == 0)
2492 break;
2493
2494 if (!(*pte & GCR3_VALID)) {
2495 if (!alloc)
2496 return NULL;
2497
2498 root = (void *)get_zeroed_page(GFP_ATOMIC);
2499 if (root == NULL)
2500 return NULL;
2501
2502 *pte = iommu_virt_to_phys(root) | GCR3_VALID;
2503 }
2504
2505 root = iommu_phys_to_virt(*pte & PAGE_MASK);
2506
2507 level -= 1;
2508 }
2509
2510 return pte;
2511}
2512
2513static int __set_gcr3(struct protection_domain *domain, u32 pasid,
2514 unsigned long cr3)
2515{
2516 u64 *pte;
2517
2518 if (domain->iop.mode != PAGE_MODE_NONE)
2519 return -EINVAL;
2520
2521 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
2522 if (pte == NULL)
2523 return -ENOMEM;
2524
2525 *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
2526
2527 return __amd_iommu_flush_tlb(domain, pasid);
2528}
2529
2530static int __clear_gcr3(struct protection_domain *domain, u32 pasid)
2531{
2532 u64 *pte;
2533
2534 if (domain->iop.mode != PAGE_MODE_NONE)
2535 return -EINVAL;
2536
2537 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
2538 if (pte == NULL)
2539 return 0;
2540
2541 *pte = 0;
2542
2543 return __amd_iommu_flush_tlb(domain, pasid);
2544}
2545
2546int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
2547 unsigned long cr3)
2548{
2549 struct protection_domain *domain = to_pdomain(dom);
2550 unsigned long flags;
2551 int ret;
2552
2553 spin_lock_irqsave(&domain->lock, flags);
2554 ret = __set_gcr3(domain, pasid, cr3);
2555 spin_unlock_irqrestore(&domain->lock, flags);
2556
2557 return ret;
2558}
2559EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
2560
2561int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid)
2562{
2563 struct protection_domain *domain = to_pdomain(dom);
2564 unsigned long flags;
2565 int ret;
2566
2567 spin_lock_irqsave(&domain->lock, flags);
2568 ret = __clear_gcr3(domain, pasid);
2569 spin_unlock_irqrestore(&domain->lock, flags);
2570
2571 return ret;
2572}
2573EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
2574
2575int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
2576 int status, int tag)
2577{
2578 struct iommu_dev_data *dev_data;
2579 struct amd_iommu *iommu;
2580 struct iommu_cmd cmd;
2581
2582 dev_data = dev_iommu_priv_get(&pdev->dev);
2583 iommu = amd_iommu_rlookup_table[dev_data->devid];
2584
2585 build_complete_ppr(&cmd, dev_data->devid, pasid, status,
2586 tag, dev_data->pri_tlp);
2587
2588 return iommu_queue_command(iommu, &cmd);
2589}
2590EXPORT_SYMBOL(amd_iommu_complete_ppr);
2591
2592int amd_iommu_device_info(struct pci_dev *pdev,
2593 struct amd_iommu_device_info *info)
2594{
2595 int max_pasids;
2596 int pos;
2597
2598 if (pdev == NULL || info == NULL)
2599 return -EINVAL;
2600
2601 if (!amd_iommu_v2_supported())
2602 return -EINVAL;
2603
2604 memset(info, 0, sizeof(*info));
2605
2606 if (pci_ats_supported(pdev))
2607 info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
2608
2609 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
2610 if (pos)
2611 info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
2612
2613 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
2614 if (pos) {
2615 int features;
2616
2617 max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
2618 max_pasids = min(max_pasids, (1 << 20));
2619
2620 info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
2621 info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
2622
2623 features = pci_pasid_features(pdev);
2624 if (features & PCI_PASID_CAP_EXEC)
2625 info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
2626 if (features & PCI_PASID_CAP_PRIV)
2627 info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
2628 }
2629
2630 return 0;
2631}
2632EXPORT_SYMBOL(amd_iommu_device_info);
2633
2634#ifdef CONFIG_IRQ_REMAP
2635
2636
2637
2638
2639
2640
2641
2642static struct irq_chip amd_ir_chip;
2643static DEFINE_SPINLOCK(iommu_table_lock);
2644
2645static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
2646{
2647 u64 dte;
2648
2649 dte = amd_iommu_dev_table[devid].data[2];
2650 dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
2651 dte |= iommu_virt_to_phys(table->table);
2652 dte |= DTE_IRQ_REMAP_INTCTL;
2653 dte |= DTE_INTTABLEN;
2654 dte |= DTE_IRQ_REMAP_ENABLE;
2655
2656 amd_iommu_dev_table[devid].data[2] = dte;
2657}
2658
2659static struct irq_remap_table *get_irq_table(u16 devid)
2660{
2661 struct irq_remap_table *table;
2662
2663 if (WARN_ONCE(!amd_iommu_rlookup_table[devid],
2664 "%s: no iommu for devid %x\n", __func__, devid))
2665 return NULL;
2666
2667 table = irq_lookup_table[devid];
2668 if (WARN_ONCE(!table, "%s: no table for devid %x\n", __func__, devid))
2669 return NULL;
2670
2671 return table;
2672}
2673
2674static struct irq_remap_table *__alloc_irq_table(void)
2675{
2676 struct irq_remap_table *table;
2677
2678 table = kzalloc(sizeof(*table), GFP_KERNEL);
2679 if (!table)
2680 return NULL;
2681
2682 table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL);
2683 if (!table->table) {
2684 kfree(table);
2685 return NULL;
2686 }
2687 raw_spin_lock_init(&table->lock);
2688
2689 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2690 memset(table->table, 0,
2691 MAX_IRQS_PER_TABLE * sizeof(u32));
2692 else
2693 memset(table->table, 0,
2694 (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
2695 return table;
2696}
2697
2698static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
2699 struct irq_remap_table *table)
2700{
2701 irq_lookup_table[devid] = table;
2702 set_dte_irq_entry(devid, table);
2703 iommu_flush_dte(iommu, devid);
2704}
2705
2706static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
2707 void *data)
2708{
2709 struct irq_remap_table *table = data;
2710
2711 irq_lookup_table[alias] = table;
2712 set_dte_irq_entry(alias, table);
2713
2714 iommu_flush_dte(amd_iommu_rlookup_table[alias], alias);
2715
2716 return 0;
2717}
2718
2719static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev)
2720{
2721 struct irq_remap_table *table = NULL;
2722 struct irq_remap_table *new_table = NULL;
2723 struct amd_iommu *iommu;
2724 unsigned long flags;
2725 u16 alias;
2726
2727 spin_lock_irqsave(&iommu_table_lock, flags);
2728
2729 iommu = amd_iommu_rlookup_table[devid];
2730 if (!iommu)
2731 goto out_unlock;
2732
2733 table = irq_lookup_table[devid];
2734 if (table)
2735 goto out_unlock;
2736
2737 alias = amd_iommu_alias_table[devid];
2738 table = irq_lookup_table[alias];
2739 if (table) {
2740 set_remap_table_entry(iommu, devid, table);
2741 goto out_wait;
2742 }
2743 spin_unlock_irqrestore(&iommu_table_lock, flags);
2744
2745
2746 new_table = __alloc_irq_table();
2747 if (!new_table)
2748 return NULL;
2749
2750 spin_lock_irqsave(&iommu_table_lock, flags);
2751
2752 table = irq_lookup_table[devid];
2753 if (table)
2754 goto out_unlock;
2755
2756 table = irq_lookup_table[alias];
2757 if (table) {
2758 set_remap_table_entry(iommu, devid, table);
2759 goto out_wait;
2760 }
2761
2762 table = new_table;
2763 new_table = NULL;
2764
2765 if (pdev)
2766 pci_for_each_dma_alias(pdev, set_remap_table_entry_alias,
2767 table);
2768 else
2769 set_remap_table_entry(iommu, devid, table);
2770
2771 if (devid != alias)
2772 set_remap_table_entry(iommu, alias, table);
2773
2774out_wait:
2775 iommu_completion_wait(iommu);
2776
2777out_unlock:
2778 spin_unlock_irqrestore(&iommu_table_lock, flags);
2779
2780 if (new_table) {
2781 kmem_cache_free(amd_iommu_irq_cache, new_table->table);
2782 kfree(new_table);
2783 }
2784 return table;
2785}
2786
2787static int alloc_irq_index(u16 devid, int count, bool align,
2788 struct pci_dev *pdev)
2789{
2790 struct irq_remap_table *table;
2791 int index, c, alignment = 1;
2792 unsigned long flags;
2793 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
2794
2795 if (!iommu)
2796 return -ENODEV;
2797
2798 table = alloc_irq_table(devid, pdev);
2799 if (!table)
2800 return -ENODEV;
2801
2802 if (align)
2803 alignment = roundup_pow_of_two(count);
2804
2805 raw_spin_lock_irqsave(&table->lock, flags);
2806
2807
2808 for (index = ALIGN(table->min_index, alignment), c = 0;
2809 index < MAX_IRQS_PER_TABLE;) {
2810 if (!iommu->irte_ops->is_allocated(table, index)) {
2811 c += 1;
2812 } else {
2813 c = 0;
2814 index = ALIGN(index + 1, alignment);
2815 continue;
2816 }
2817
2818 if (c == count) {
2819 for (; c != 0; --c)
2820 iommu->irte_ops->set_allocated(table, index - c + 1);
2821
2822 index -= count - 1;
2823 goto out;
2824 }
2825
2826 index++;
2827 }
2828
2829 index = -ENOSPC;
2830
2831out:
2832 raw_spin_unlock_irqrestore(&table->lock, flags);
2833
2834 return index;
2835}
2836
2837static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
2838 struct amd_ir_data *data)
2839{
2840 bool ret;
2841 struct irq_remap_table *table;
2842 struct amd_iommu *iommu;
2843 unsigned long flags;
2844 struct irte_ga *entry;
2845
2846 iommu = amd_iommu_rlookup_table[devid];
2847 if (iommu == NULL)
2848 return -EINVAL;
2849
2850 table = get_irq_table(devid);
2851 if (!table)
2852 return -ENOMEM;
2853
2854 raw_spin_lock_irqsave(&table->lock, flags);
2855
2856 entry = (struct irte_ga *)table->table;
2857 entry = &entry[index];
2858
2859 ret = cmpxchg_double(&entry->lo.val, &entry->hi.val,
2860 entry->lo.val, entry->hi.val,
2861 irte->lo.val, irte->hi.val);
2862
2863
2864
2865
2866
2867
2868 WARN_ON(!ret);
2869
2870 if (data)
2871 data->ref = entry;
2872
2873 raw_spin_unlock_irqrestore(&table->lock, flags);
2874
2875 iommu_flush_irt(iommu, devid);
2876 iommu_completion_wait(iommu);
2877
2878 return 0;
2879}
2880
2881static int modify_irte(u16 devid, int index, union irte *irte)
2882{
2883 struct irq_remap_table *table;
2884 struct amd_iommu *iommu;
2885 unsigned long flags;
2886
2887 iommu = amd_iommu_rlookup_table[devid];
2888 if (iommu == NULL)
2889 return -EINVAL;
2890
2891 table = get_irq_table(devid);
2892 if (!table)
2893 return -ENOMEM;
2894
2895 raw_spin_lock_irqsave(&table->lock, flags);
2896 table->table[index] = irte->val;
2897 raw_spin_unlock_irqrestore(&table->lock, flags);
2898
2899 iommu_flush_irt(iommu, devid);
2900 iommu_completion_wait(iommu);
2901
2902 return 0;
2903}
2904
2905static void free_irte(u16 devid, int index)
2906{
2907 struct irq_remap_table *table;
2908 struct amd_iommu *iommu;
2909 unsigned long flags;
2910
2911 iommu = amd_iommu_rlookup_table[devid];
2912 if (iommu == NULL)
2913 return;
2914
2915 table = get_irq_table(devid);
2916 if (!table)
2917 return;
2918
2919 raw_spin_lock_irqsave(&table->lock, flags);
2920 iommu->irte_ops->clear_allocated(table, index);
2921 raw_spin_unlock_irqrestore(&table->lock, flags);
2922
2923 iommu_flush_irt(iommu, devid);
2924 iommu_completion_wait(iommu);
2925}
2926
2927static void irte_prepare(void *entry,
2928 u32 delivery_mode, bool dest_mode,
2929 u8 vector, u32 dest_apicid, int devid)
2930{
2931 union irte *irte = (union irte *) entry;
2932
2933 irte->val = 0;
2934 irte->fields.vector = vector;
2935 irte->fields.int_type = delivery_mode;
2936 irte->fields.destination = dest_apicid;
2937 irte->fields.dm = dest_mode;
2938 irte->fields.valid = 1;
2939}
2940
2941static void irte_ga_prepare(void *entry,
2942 u32 delivery_mode, bool dest_mode,
2943 u8 vector, u32 dest_apicid, int devid)
2944{
2945 struct irte_ga *irte = (struct irte_ga *) entry;
2946
2947 irte->lo.val = 0;
2948 irte->hi.val = 0;
2949 irte->lo.fields_remap.int_type = delivery_mode;
2950 irte->lo.fields_remap.dm = dest_mode;
2951 irte->hi.fields.vector = vector;
2952 irte->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(dest_apicid);
2953 irte->hi.fields.destination = APICID_TO_IRTE_DEST_HI(dest_apicid);
2954 irte->lo.fields_remap.valid = 1;
2955}
2956
2957static void irte_activate(void *entry, u16 devid, u16 index)
2958{
2959 union irte *irte = (union irte *) entry;
2960
2961 irte->fields.valid = 1;
2962 modify_irte(devid, index, irte);
2963}
2964
2965static void irte_ga_activate(void *entry, u16 devid, u16 index)
2966{
2967 struct irte_ga *irte = (struct irte_ga *) entry;
2968
2969 irte->lo.fields_remap.valid = 1;
2970 modify_irte_ga(devid, index, irte, NULL);
2971}
2972
2973static void irte_deactivate(void *entry, u16 devid, u16 index)
2974{
2975 union irte *irte = (union irte *) entry;
2976
2977 irte->fields.valid = 0;
2978 modify_irte(devid, index, irte);
2979}
2980
2981static void irte_ga_deactivate(void *entry, u16 devid, u16 index)
2982{
2983 struct irte_ga *irte = (struct irte_ga *) entry;
2984
2985 irte->lo.fields_remap.valid = 0;
2986 modify_irte_ga(devid, index, irte, NULL);
2987}
2988
2989static void irte_set_affinity(void *entry, u16 devid, u16 index,
2990 u8 vector, u32 dest_apicid)
2991{
2992 union irte *irte = (union irte *) entry;
2993
2994 irte->fields.vector = vector;
2995 irte->fields.destination = dest_apicid;
2996 modify_irte(devid, index, irte);
2997}
2998
2999static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
3000 u8 vector, u32 dest_apicid)
3001{
3002 struct irte_ga *irte = (struct irte_ga *) entry;
3003
3004 if (!irte->lo.fields_remap.guest_mode) {
3005 irte->hi.fields.vector = vector;
3006 irte->lo.fields_remap.destination =
3007 APICID_TO_IRTE_DEST_LO(dest_apicid);
3008 irte->hi.fields.destination =
3009 APICID_TO_IRTE_DEST_HI(dest_apicid);
3010 modify_irte_ga(devid, index, irte, NULL);
3011 }
3012}
3013
3014#define IRTE_ALLOCATED (~1U)
3015static void irte_set_allocated(struct irq_remap_table *table, int index)
3016{
3017 table->table[index] = IRTE_ALLOCATED;
3018}
3019
3020static void irte_ga_set_allocated(struct irq_remap_table *table, int index)
3021{
3022 struct irte_ga *ptr = (struct irte_ga *)table->table;
3023 struct irte_ga *irte = &ptr[index];
3024
3025 memset(&irte->lo.val, 0, sizeof(u64));
3026 memset(&irte->hi.val, 0, sizeof(u64));
3027 irte->hi.fields.vector = 0xff;
3028}
3029
3030static bool irte_is_allocated(struct irq_remap_table *table, int index)
3031{
3032 union irte *ptr = (union irte *)table->table;
3033 union irte *irte = &ptr[index];
3034
3035 return irte->val != 0;
3036}
3037
3038static bool irte_ga_is_allocated(struct irq_remap_table *table, int index)
3039{
3040 struct irte_ga *ptr = (struct irte_ga *)table->table;
3041 struct irte_ga *irte = &ptr[index];
3042
3043 return irte->hi.fields.vector != 0;
3044}
3045
3046static void irte_clear_allocated(struct irq_remap_table *table, int index)
3047{
3048 table->table[index] = 0;
3049}
3050
3051static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
3052{
3053 struct irte_ga *ptr = (struct irte_ga *)table->table;
3054 struct irte_ga *irte = &ptr[index];
3055
3056 memset(&irte->lo.val, 0, sizeof(u64));
3057 memset(&irte->hi.val, 0, sizeof(u64));
3058}
3059
3060static int get_devid(struct irq_alloc_info *info)
3061{
3062 switch (info->type) {
3063 case X86_IRQ_ALLOC_TYPE_IOAPIC:
3064 return get_ioapic_devid(info->devid);
3065 case X86_IRQ_ALLOC_TYPE_HPET:
3066 return get_hpet_devid(info->devid);
3067 case X86_IRQ_ALLOC_TYPE_PCI_MSI:
3068 case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
3069 return get_device_id(msi_desc_to_dev(info->desc));
3070 default:
3071 WARN_ON_ONCE(1);
3072 return -1;
3073 }
3074}
3075
3076struct irq_remap_ops amd_iommu_irq_ops = {
3077 .prepare = amd_iommu_prepare,
3078 .enable = amd_iommu_enable,
3079 .disable = amd_iommu_disable,
3080 .reenable = amd_iommu_reenable,
3081 .enable_faulting = amd_iommu_enable_faulting,
3082};
3083
3084static void fill_msi_msg(struct msi_msg *msg, u32 index)
3085{
3086 msg->data = index;
3087 msg->address_lo = 0;
3088 msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
3089 msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
3090}
3091
3092static void irq_remapping_prepare_irte(struct amd_ir_data *data,
3093 struct irq_cfg *irq_cfg,
3094 struct irq_alloc_info *info,
3095 int devid, int index, int sub_handle)
3096{
3097 struct irq_2_irte *irte_info = &data->irq_2_irte;
3098 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
3099
3100 if (!iommu)
3101 return;
3102
3103 data->irq_2_irte.devid = devid;
3104 data->irq_2_irte.index = index + sub_handle;
3105 iommu->irte_ops->prepare(data->entry, apic->delivery_mode,
3106 apic->dest_mode_logical, irq_cfg->vector,
3107 irq_cfg->dest_apicid, devid);
3108
3109 switch (info->type) {
3110 case X86_IRQ_ALLOC_TYPE_IOAPIC:
3111 case X86_IRQ_ALLOC_TYPE_HPET:
3112 case X86_IRQ_ALLOC_TYPE_PCI_MSI:
3113 case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
3114 fill_msi_msg(&data->msi_entry, irte_info->index);
3115 break;
3116
3117 default:
3118 BUG_ON(1);
3119 break;
3120 }
3121}
3122
3123struct amd_irte_ops irte_32_ops = {
3124 .prepare = irte_prepare,
3125 .activate = irte_activate,
3126 .deactivate = irte_deactivate,
3127 .set_affinity = irte_set_affinity,
3128 .set_allocated = irte_set_allocated,
3129 .is_allocated = irte_is_allocated,
3130 .clear_allocated = irte_clear_allocated,
3131};
3132
3133struct amd_irte_ops irte_128_ops = {
3134 .prepare = irte_ga_prepare,
3135 .activate = irte_ga_activate,
3136 .deactivate = irte_ga_deactivate,
3137 .set_affinity = irte_ga_set_affinity,
3138 .set_allocated = irte_ga_set_allocated,
3139 .is_allocated = irte_ga_is_allocated,
3140 .clear_allocated = irte_ga_clear_allocated,
3141};
3142
3143static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
3144 unsigned int nr_irqs, void *arg)
3145{
3146 struct irq_alloc_info *info = arg;
3147 struct irq_data *irq_data;
3148 struct amd_ir_data *data = NULL;
3149 struct irq_cfg *cfg;
3150 int i, ret, devid;
3151 int index;
3152
3153 if (!info)
3154 return -EINVAL;
3155 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI &&
3156 info->type != X86_IRQ_ALLOC_TYPE_PCI_MSIX)
3157 return -EINVAL;
3158
3159
3160
3161
3162
3163 if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
3164 info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
3165
3166 devid = get_devid(info);
3167 if (devid < 0)
3168 return -EINVAL;
3169
3170 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
3171 if (ret < 0)
3172 return ret;
3173
3174 if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
3175 struct irq_remap_table *table;
3176 struct amd_iommu *iommu;
3177
3178 table = alloc_irq_table(devid, NULL);
3179 if (table) {
3180 if (!table->min_index) {
3181
3182
3183
3184
3185 table->min_index = 32;
3186 iommu = amd_iommu_rlookup_table[devid];
3187 for (i = 0; i < 32; ++i)
3188 iommu->irte_ops->set_allocated(table, i);
3189 }
3190 WARN_ON(table->min_index != 32);
3191 index = info->ioapic.pin;
3192 } else {
3193 index = -ENOMEM;
3194 }
3195 } else if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI ||
3196 info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) {
3197 bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI);
3198
3199 index = alloc_irq_index(devid, nr_irqs, align,
3200 msi_desc_to_pci_dev(info->desc));
3201 } else {
3202 index = alloc_irq_index(devid, nr_irqs, false, NULL);
3203 }
3204
3205 if (index < 0) {
3206 pr_warn("Failed to allocate IRTE\n");
3207 ret = index;
3208 goto out_free_parent;
3209 }
3210
3211 for (i = 0; i < nr_irqs; i++) {
3212 irq_data = irq_domain_get_irq_data(domain, virq + i);
3213 cfg = irq_data ? irqd_cfg(irq_data) : NULL;
3214 if (!cfg) {
3215 ret = -EINVAL;
3216 goto out_free_data;
3217 }
3218
3219 ret = -ENOMEM;
3220 data = kzalloc(sizeof(*data), GFP_KERNEL);
3221 if (!data)
3222 goto out_free_data;
3223
3224 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
3225 data->entry = kzalloc(sizeof(union irte), GFP_KERNEL);
3226 else
3227 data->entry = kzalloc(sizeof(struct irte_ga),
3228 GFP_KERNEL);
3229 if (!data->entry) {
3230 kfree(data);
3231 goto out_free_data;
3232 }
3233
3234 irq_data->hwirq = (devid << 16) + i;
3235 irq_data->chip_data = data;
3236 irq_data->chip = &amd_ir_chip;
3237 irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
3238 irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
3239 }
3240
3241 return 0;
3242
3243out_free_data:
3244 for (i--; i >= 0; i--) {
3245 irq_data = irq_domain_get_irq_data(domain, virq + i);
3246 if (irq_data)
3247 kfree(irq_data->chip_data);
3248 }
3249 for (i = 0; i < nr_irqs; i++)
3250 free_irte(devid, index + i);
3251out_free_parent:
3252 irq_domain_free_irqs_common(domain, virq, nr_irqs);
3253 return ret;
3254}
3255
3256static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
3257 unsigned int nr_irqs)
3258{
3259 struct irq_2_irte *irte_info;
3260 struct irq_data *irq_data;
3261 struct amd_ir_data *data;
3262 int i;
3263
3264 for (i = 0; i < nr_irqs; i++) {
3265 irq_data = irq_domain_get_irq_data(domain, virq + i);
3266 if (irq_data && irq_data->chip_data) {
3267 data = irq_data->chip_data;
3268 irte_info = &data->irq_2_irte;
3269 free_irte(irte_info->devid, irte_info->index);
3270 kfree(data->entry);
3271 kfree(data);
3272 }
3273 }
3274 irq_domain_free_irqs_common(domain, virq, nr_irqs);
3275}
3276
3277static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3278 struct amd_ir_data *ir_data,
3279 struct irq_2_irte *irte_info,
3280 struct irq_cfg *cfg);
3281
3282static int irq_remapping_activate(struct irq_domain *domain,
3283 struct irq_data *irq_data, bool reserve)
3284{
3285 struct amd_ir_data *data = irq_data->chip_data;
3286 struct irq_2_irte *irte_info = &data->irq_2_irte;
3287 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
3288 struct irq_cfg *cfg = irqd_cfg(irq_data);
3289
3290 if (!iommu)
3291 return 0;
3292
3293 iommu->irte_ops->activate(data->entry, irte_info->devid,
3294 irte_info->index);
3295 amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
3296 return 0;
3297}
3298
3299static void irq_remapping_deactivate(struct irq_domain *domain,
3300 struct irq_data *irq_data)
3301{
3302 struct amd_ir_data *data = irq_data->chip_data;
3303 struct irq_2_irte *irte_info = &data->irq_2_irte;
3304 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
3305
3306 if (iommu)
3307 iommu->irte_ops->deactivate(data->entry, irte_info->devid,
3308 irte_info->index);
3309}
3310
3311static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
3312 enum irq_domain_bus_token bus_token)
3313{
3314 struct amd_iommu *iommu;
3315 int devid = -1;
3316
3317 if (!amd_iommu_irq_remap)
3318 return 0;
3319
3320 if (x86_fwspec_is_ioapic(fwspec))
3321 devid = get_ioapic_devid(fwspec->param[0]);
3322 else if (x86_fwspec_is_hpet(fwspec))
3323 devid = get_hpet_devid(fwspec->param[0]);
3324
3325 if (devid < 0)
3326 return 0;
3327
3328 iommu = amd_iommu_rlookup_table[devid];
3329 return iommu && iommu->ir_domain == d;
3330}
3331
3332static const struct irq_domain_ops amd_ir_domain_ops = {
3333 .select = irq_remapping_select,
3334 .alloc = irq_remapping_alloc,
3335 .free = irq_remapping_free,
3336 .activate = irq_remapping_activate,
3337 .deactivate = irq_remapping_deactivate,
3338};
3339
3340int amd_iommu_activate_guest_mode(void *data)
3341{
3342 struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
3343 struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
3344 u64 valid;
3345
3346 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
3347 !entry || entry->lo.fields_vapic.guest_mode)
3348 return 0;
3349
3350 valid = entry->lo.fields_vapic.valid;
3351
3352 entry->lo.val = 0;
3353 entry->hi.val = 0;
3354
3355 entry->lo.fields_vapic.valid = valid;
3356 entry->lo.fields_vapic.guest_mode = 1;
3357 entry->lo.fields_vapic.ga_log_intr = 1;
3358 entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr;
3359 entry->hi.fields.vector = ir_data->ga_vector;
3360 entry->lo.fields_vapic.ga_tag = ir_data->ga_tag;
3361
3362 return modify_irte_ga(ir_data->irq_2_irte.devid,
3363 ir_data->irq_2_irte.index, entry, ir_data);
3364}
3365EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
3366
3367int amd_iommu_deactivate_guest_mode(void *data)
3368{
3369 struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
3370 struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
3371 struct irq_cfg *cfg = ir_data->cfg;
3372 u64 valid;
3373
3374 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
3375 !entry || !entry->lo.fields_vapic.guest_mode)
3376 return 0;
3377
3378 valid = entry->lo.fields_remap.valid;
3379
3380 entry->lo.val = 0;
3381 entry->hi.val = 0;
3382
3383 entry->lo.fields_remap.valid = valid;
3384 entry->lo.fields_remap.dm = apic->dest_mode_logical;
3385 entry->lo.fields_remap.int_type = apic->delivery_mode;
3386 entry->hi.fields.vector = cfg->vector;
3387 entry->lo.fields_remap.destination =
3388 APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
3389 entry->hi.fields.destination =
3390 APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
3391
3392 return modify_irte_ga(ir_data->irq_2_irte.devid,
3393 ir_data->irq_2_irte.index, entry, ir_data);
3394}
3395EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
3396
3397static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
3398{
3399 int ret;
3400 struct amd_iommu *iommu;
3401 struct amd_iommu_pi_data *pi_data = vcpu_info;
3402 struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
3403 struct amd_ir_data *ir_data = data->chip_data;
3404 struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
3405 struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
3406
3407
3408
3409
3410
3411 if (!dev_data || !dev_data->use_vapic)
3412 return 0;
3413
3414 ir_data->cfg = irqd_cfg(data);
3415 pi_data->ir_data = ir_data;
3416
3417
3418
3419
3420
3421 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
3422 pr_debug("%s: Fall back to using intr legacy remap\n",
3423 __func__);
3424 pi_data->is_guest_mode = false;
3425 }
3426
3427 iommu = amd_iommu_rlookup_table[irte_info->devid];
3428 if (iommu == NULL)
3429 return -EINVAL;
3430
3431 pi_data->prev_ga_tag = ir_data->cached_ga_tag;
3432 if (pi_data->is_guest_mode) {
3433 ir_data->ga_root_ptr = (pi_data->base >> 12);
3434 ir_data->ga_vector = vcpu_pi_info->vector;
3435 ir_data->ga_tag = pi_data->ga_tag;
3436 ret = amd_iommu_activate_guest_mode(ir_data);
3437 if (!ret)
3438 ir_data->cached_ga_tag = pi_data->ga_tag;
3439 } else {
3440 ret = amd_iommu_deactivate_guest_mode(ir_data);
3441
3442
3443
3444
3445
3446 if (!ret)
3447 ir_data->cached_ga_tag = 0;
3448 }
3449
3450 return ret;
3451}
3452
3453
3454static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3455 struct amd_ir_data *ir_data,
3456 struct irq_2_irte *irte_info,
3457 struct irq_cfg *cfg)
3458{
3459
3460
3461
3462
3463
3464 iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
3465 irte_info->index, cfg->vector,
3466 cfg->dest_apicid);
3467}
3468
3469static int amd_ir_set_affinity(struct irq_data *data,
3470 const struct cpumask *mask, bool force)
3471{
3472 struct amd_ir_data *ir_data = data->chip_data;
3473 struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
3474 struct irq_cfg *cfg = irqd_cfg(data);
3475 struct irq_data *parent = data->parent_data;
3476 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
3477 int ret;
3478
3479 if (!iommu)
3480 return -ENODEV;
3481
3482 ret = parent->chip->irq_set_affinity(parent, mask, force);
3483 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
3484 return ret;
3485
3486 amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg);
3487
3488
3489
3490
3491
3492 send_cleanup_vector(cfg);
3493
3494 return IRQ_SET_MASK_OK_DONE;
3495}
3496
3497static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
3498{
3499 struct amd_ir_data *ir_data = irq_data->chip_data;
3500
3501 *msg = ir_data->msi_entry;
3502}
3503
3504static struct irq_chip amd_ir_chip = {
3505 .name = "AMD-IR",
3506 .irq_ack = apic_ack_irq,
3507 .irq_set_affinity = amd_ir_set_affinity,
3508 .irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
3509 .irq_compose_msi_msg = ir_compose_msi_msg,
3510};
3511
3512int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
3513{
3514 struct fwnode_handle *fn;
3515
3516 fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index);
3517 if (!fn)
3518 return -ENOMEM;
3519 iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu);
3520 if (!iommu->ir_domain) {
3521 irq_domain_free_fwnode(fn);
3522 return -ENOMEM;
3523 }
3524
3525 iommu->ir_domain->parent = arch_get_ir_parent_domain();
3526 iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain,
3527 "AMD-IR-MSI",
3528 iommu->index);
3529 return 0;
3530}
3531
3532int amd_iommu_update_ga(int cpu, bool is_run, void *data)
3533{
3534 unsigned long flags;
3535 struct amd_iommu *iommu;
3536 struct irq_remap_table *table;
3537 struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
3538 int devid = ir_data->irq_2_irte.devid;
3539 struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
3540 struct irte_ga *ref = (struct irte_ga *) ir_data->ref;
3541
3542 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
3543 !ref || !entry || !entry->lo.fields_vapic.guest_mode)
3544 return 0;
3545
3546 iommu = amd_iommu_rlookup_table[devid];
3547 if (!iommu)
3548 return -ENODEV;
3549
3550 table = get_irq_table(devid);
3551 if (!table)
3552 return -ENODEV;
3553
3554 raw_spin_lock_irqsave(&table->lock, flags);
3555
3556 if (ref->lo.fields_vapic.guest_mode) {
3557 if (cpu >= 0) {
3558 ref->lo.fields_vapic.destination =
3559 APICID_TO_IRTE_DEST_LO(cpu);
3560 ref->hi.fields.destination =
3561 APICID_TO_IRTE_DEST_HI(cpu);
3562 }
3563 ref->lo.fields_vapic.is_run = is_run;
3564 barrier();
3565 }
3566
3567 raw_spin_unlock_irqrestore(&table->lock, flags);
3568
3569 iommu_flush_irt(iommu, devid);
3570 iommu_completion_wait(iommu);
3571 return 0;
3572}
3573EXPORT_SYMBOL(amd_iommu_update_ga);
3574#endif
3575