1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) "DMAR: " fmt
11
12#include <linux/bitops.h>
13#include <linux/cpufeature.h>
14#include <linux/dmar.h>
15#include <linux/intel-iommu.h>
16#include <linux/iommu.h>
17#include <linux/memory.h>
18#include <linux/pci.h>
19#include <linux/pci-ats.h>
20#include <linux/spinlock.h>
21
22#include "pasid.h"
23
24
25
26
27u32 intel_pasid_max_id = PASID_MAX;
28
29int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid)
30{
31 unsigned long flags;
32 u8 status_code;
33 int ret = 0;
34 u64 res;
35
36 raw_spin_lock_irqsave(&iommu->register_lock, flags);
37 dmar_writeq(iommu->reg + DMAR_VCMD_REG, VCMD_CMD_ALLOC);
38 IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
39 !(res & VCMD_VRSP_IP), res);
40 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
41
42 status_code = VCMD_VRSP_SC(res);
43 switch (status_code) {
44 case VCMD_VRSP_SC_SUCCESS:
45 *pasid = VCMD_VRSP_RESULT_PASID(res);
46 break;
47 case VCMD_VRSP_SC_NO_PASID_AVAIL:
48 pr_info("IOMMU: %s: No PASID available\n", iommu->name);
49 ret = -ENOSPC;
50 break;
51 default:
52 ret = -ENODEV;
53 pr_warn("IOMMU: %s: Unexpected error code %d\n",
54 iommu->name, status_code);
55 }
56
57 return ret;
58}
59
60void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid)
61{
62 unsigned long flags;
63 u8 status_code;
64 u64 res;
65
66 raw_spin_lock_irqsave(&iommu->register_lock, flags);
67 dmar_writeq(iommu->reg + DMAR_VCMD_REG,
68 VCMD_CMD_OPERAND(pasid) | VCMD_CMD_FREE);
69 IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
70 !(res & VCMD_VRSP_IP), res);
71 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
72
73 status_code = VCMD_VRSP_SC(res);
74 switch (status_code) {
75 case VCMD_VRSP_SC_SUCCESS:
76 break;
77 case VCMD_VRSP_SC_INVALID_PASID:
78 pr_info("IOMMU: %s: Invalid PASID\n", iommu->name);
79 break;
80 default:
81 pr_warn("IOMMU: %s: Unexpected error code %d\n",
82 iommu->name, status_code);
83 }
84}
85
86
87
88
89static inline void
90device_attach_pasid_table(struct device_domain_info *info,
91 struct pasid_table *pasid_table)
92{
93 info->pasid_table = pasid_table;
94 list_add(&info->table, &pasid_table->dev);
95}
96
97static inline void
98device_detach_pasid_table(struct device_domain_info *info,
99 struct pasid_table *pasid_table)
100{
101 info->pasid_table = NULL;
102 list_del(&info->table);
103}
104
105struct pasid_table_opaque {
106 struct pasid_table **pasid_table;
107 int segment;
108 int bus;
109 int devfn;
110};
111
112static int search_pasid_table(struct device_domain_info *info, void *opaque)
113{
114 struct pasid_table_opaque *data = opaque;
115
116 if (info->iommu->segment == data->segment &&
117 info->bus == data->bus &&
118 info->devfn == data->devfn &&
119 info->pasid_table) {
120 *data->pasid_table = info->pasid_table;
121 return 1;
122 }
123
124 return 0;
125}
126
127static int get_alias_pasid_table(struct pci_dev *pdev, u16 alias, void *opaque)
128{
129 struct pasid_table_opaque *data = opaque;
130
131 data->segment = pci_domain_nr(pdev->bus);
132 data->bus = PCI_BUS_NUM(alias);
133 data->devfn = alias & 0xff;
134
135 return for_each_device_domain(&search_pasid_table, data);
136}
137
138
139
140
141
142int intel_pasid_alloc_table(struct device *dev)
143{
144 struct device_domain_info *info;
145 struct pasid_table *pasid_table;
146 struct pasid_table_opaque data;
147 struct page *pages;
148 u32 max_pasid = 0;
149 int ret, order;
150 int size;
151
152 might_sleep();
153 info = get_domain_info(dev);
154 if (WARN_ON(!info || !dev_is_pci(dev) || info->pasid_table))
155 return -EINVAL;
156
157
158 data.pasid_table = &pasid_table;
159 ret = pci_for_each_dma_alias(to_pci_dev(dev),
160 &get_alias_pasid_table, &data);
161 if (ret)
162 goto attach_out;
163
164 pasid_table = kzalloc(sizeof(*pasid_table), GFP_KERNEL);
165 if (!pasid_table)
166 return -ENOMEM;
167 INIT_LIST_HEAD(&pasid_table->dev);
168
169 if (info->pasid_supported)
170 max_pasid = min_t(u32, pci_max_pasids(to_pci_dev(dev)),
171 intel_pasid_max_id);
172
173 size = max_pasid >> (PASID_PDE_SHIFT - 3);
174 order = size ? get_order(size) : 0;
175 pages = alloc_pages_node(info->iommu->node,
176 GFP_KERNEL | __GFP_ZERO, order);
177 if (!pages) {
178 kfree(pasid_table);
179 return -ENOMEM;
180 }
181
182 pasid_table->table = page_address(pages);
183 pasid_table->order = order;
184 pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
185
186attach_out:
187 device_attach_pasid_table(info, pasid_table);
188
189 return 0;
190}
191
192void intel_pasid_free_table(struct device *dev)
193{
194 struct device_domain_info *info;
195 struct pasid_table *pasid_table;
196 struct pasid_dir_entry *dir;
197 struct pasid_entry *table;
198 int i, max_pde;
199
200 info = get_domain_info(dev);
201 if (!info || !dev_is_pci(dev) || !info->pasid_table)
202 return;
203
204 pasid_table = info->pasid_table;
205 device_detach_pasid_table(info, pasid_table);
206
207 if (!list_empty(&pasid_table->dev))
208 return;
209
210
211 dir = pasid_table->table;
212 max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT;
213 for (i = 0; i < max_pde; i++) {
214 table = get_pasid_table_from_pde(&dir[i]);
215 free_pgtable_page(table);
216 }
217
218 free_pages((unsigned long)pasid_table->table, pasid_table->order);
219 kfree(pasid_table);
220}
221
222struct pasid_table *intel_pasid_get_table(struct device *dev)
223{
224 struct device_domain_info *info;
225
226 info = get_domain_info(dev);
227 if (!info)
228 return NULL;
229
230 return info->pasid_table;
231}
232
233static int intel_pasid_get_dev_max_id(struct device *dev)
234{
235 struct device_domain_info *info;
236
237 info = get_domain_info(dev);
238 if (!info || !info->pasid_table)
239 return 0;
240
241 return info->pasid_table->max_pasid;
242}
243
244static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
245{
246 struct device_domain_info *info;
247 struct pasid_table *pasid_table;
248 struct pasid_dir_entry *dir;
249 struct pasid_entry *entries;
250 int dir_index, index;
251
252 pasid_table = intel_pasid_get_table(dev);
253 if (WARN_ON(!pasid_table || pasid >= intel_pasid_get_dev_max_id(dev)))
254 return NULL;
255
256 dir = pasid_table->table;
257 info = get_domain_info(dev);
258 dir_index = pasid >> PASID_PDE_SHIFT;
259 index = pasid & PASID_PTE_MASK;
260
261retry:
262 entries = get_pasid_table_from_pde(&dir[dir_index]);
263 if (!entries) {
264 entries = alloc_pgtable_page(info->iommu->node);
265 if (!entries)
266 return NULL;
267
268
269
270
271
272
273
274 if (cmpxchg64(&dir[dir_index].val, 0ULL,
275 (u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) {
276 free_pgtable_page(entries);
277 goto retry;
278 }
279 }
280
281 return &entries[index];
282}
283
284
285
286
287static inline void pasid_clear_entry(struct pasid_entry *pe)
288{
289 WRITE_ONCE(pe->val[0], 0);
290 WRITE_ONCE(pe->val[1], 0);
291 WRITE_ONCE(pe->val[2], 0);
292 WRITE_ONCE(pe->val[3], 0);
293 WRITE_ONCE(pe->val[4], 0);
294 WRITE_ONCE(pe->val[5], 0);
295 WRITE_ONCE(pe->val[6], 0);
296 WRITE_ONCE(pe->val[7], 0);
297}
298
299static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
300{
301 WRITE_ONCE(pe->val[0], PASID_PTE_FPD);
302 WRITE_ONCE(pe->val[1], 0);
303 WRITE_ONCE(pe->val[2], 0);
304 WRITE_ONCE(pe->val[3], 0);
305 WRITE_ONCE(pe->val[4], 0);
306 WRITE_ONCE(pe->val[5], 0);
307 WRITE_ONCE(pe->val[6], 0);
308 WRITE_ONCE(pe->val[7], 0);
309}
310
311static void
312intel_pasid_clear_entry(struct device *dev, u32 pasid, bool fault_ignore)
313{
314 struct pasid_entry *pe;
315
316 pe = intel_pasid_get_entry(dev, pasid);
317 if (WARN_ON(!pe))
318 return;
319
320 if (fault_ignore && pasid_pte_is_present(pe))
321 pasid_clear_entry_with_fpd(pe);
322 else
323 pasid_clear_entry(pe);
324}
325
326static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
327{
328 u64 old;
329
330 old = READ_ONCE(*ptr);
331 WRITE_ONCE(*ptr, (old & ~mask) | bits);
332}
333
334
335
336
337
338static inline void
339pasid_set_domain_id(struct pasid_entry *pe, u64 value)
340{
341 pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value);
342}
343
344
345
346
347static inline u16
348pasid_get_domain_id(struct pasid_entry *pe)
349{
350 return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0));
351}
352
353
354
355
356
357static inline void
358pasid_set_slptr(struct pasid_entry *pe, u64 value)
359{
360 pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value);
361}
362
363
364
365
366
367static inline void
368pasid_set_address_width(struct pasid_entry *pe, u64 value)
369{
370 pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2);
371}
372
373
374
375
376
377static inline void
378pasid_set_translation_type(struct pasid_entry *pe, u64 value)
379{
380 pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6);
381}
382
383
384
385
386
387static inline void pasid_set_fault_enable(struct pasid_entry *pe)
388{
389 pasid_set_bits(&pe->val[0], 1 << 1, 0);
390}
391
392
393
394
395
396static inline void pasid_set_sre(struct pasid_entry *pe)
397{
398 pasid_set_bits(&pe->val[2], 1 << 0, 1);
399}
400
401
402
403
404
405static inline void pasid_set_wpe(struct pasid_entry *pe)
406{
407 pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4);
408}
409
410
411
412
413
414static inline void pasid_set_present(struct pasid_entry *pe)
415{
416 pasid_set_bits(&pe->val[0], 1 << 0, 1);
417}
418
419
420
421
422
423static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
424{
425 pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
426}
427
428
429
430
431
432static inline void
433pasid_set_pgsnp(struct pasid_entry *pe)
434{
435 pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
436}
437
438
439
440
441
442static inline void
443pasid_set_flptr(struct pasid_entry *pe, u64 value)
444{
445 pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value);
446}
447
448
449
450
451
452static inline void
453pasid_set_flpm(struct pasid_entry *pe, u64 value)
454{
455 pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
456}
457
458
459
460
461
462static inline void
463pasid_set_eafe(struct pasid_entry *pe)
464{
465 pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
466}
467
468static void
469pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
470 u16 did, u32 pasid)
471{
472 struct qi_desc desc;
473
474 desc.qw0 = QI_PC_DID(did) | QI_PC_GRAN(QI_PC_PASID_SEL) |
475 QI_PC_PASID(pasid) | QI_PC_TYPE;
476 desc.qw1 = 0;
477 desc.qw2 = 0;
478 desc.qw3 = 0;
479
480 qi_submit_sync(iommu, &desc, 1, 0);
481}
482
483static void
484devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
485 struct device *dev, u32 pasid)
486{
487 struct device_domain_info *info;
488 u16 sid, qdep, pfsid;
489
490 info = get_domain_info(dev);
491 if (!info || !info->ats_enabled)
492 return;
493
494 sid = info->bus << 8 | info->devfn;
495 qdep = info->ats_qdep;
496 pfsid = info->pfsid;
497
498
499
500
501
502
503
504 if (pasid == PASID_RID2PASID)
505 qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
506 else
507 qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT);
508}
509
510void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
511 u32 pasid, bool fault_ignore)
512{
513 struct pasid_entry *pte;
514 u16 did, pgtt;
515
516 pte = intel_pasid_get_entry(dev, pasid);
517 if (WARN_ON(!pte))
518 return;
519
520 if (!pasid_pte_is_present(pte))
521 return;
522
523 did = pasid_get_domain_id(pte);
524 pgtt = pasid_pte_get_pgtt(pte);
525
526 intel_pasid_clear_entry(dev, pasid, fault_ignore);
527
528 if (!ecap_coherent(iommu->ecap))
529 clflush_cache_range(pte, sizeof(*pte));
530
531 pasid_cache_invalidation_with_pasid(iommu, did, pasid);
532
533 if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY)
534 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
535 else
536 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
537
538
539 if (!cap_caching_mode(iommu->cap))
540 devtlb_invalidation_with_pasid(iommu, dev, pasid);
541}
542
543
544
545
546
547static void pasid_flush_caches(struct intel_iommu *iommu,
548 struct pasid_entry *pte,
549 u32 pasid, u16 did)
550{
551 if (!ecap_coherent(iommu->ecap))
552 clflush_cache_range(pte, sizeof(*pte));
553
554 if (cap_caching_mode(iommu->cap)) {
555 pasid_cache_invalidation_with_pasid(iommu, did, pasid);
556 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
557 } else {
558 iommu_flush_write_buffer(iommu);
559 }
560}
561
562static inline int pasid_enable_wpe(struct pasid_entry *pte)
563{
564#ifdef CONFIG_X86
565 unsigned long cr0 = read_cr0();
566
567
568 if (unlikely(!(cr0 & X86_CR0_WP))) {
569 pr_err_ratelimited("No CPU write protect!\n");
570 return -EINVAL;
571 }
572#endif
573 pasid_set_wpe(pte);
574
575 return 0;
576};
577
578
579
580
581
582int intel_pasid_setup_first_level(struct intel_iommu *iommu,
583 struct device *dev, pgd_t *pgd,
584 u32 pasid, u16 did, int flags)
585{
586 struct pasid_entry *pte;
587
588 if (!ecap_flts(iommu->ecap)) {
589 pr_err("No first level translation support on %s\n",
590 iommu->name);
591 return -EINVAL;
592 }
593
594 pte = intel_pasid_get_entry(dev, pasid);
595 if (WARN_ON(!pte))
596 return -EINVAL;
597
598
599 if (pasid_pte_is_present(pte))
600 return -EBUSY;
601
602 pasid_clear_entry(pte);
603
604
605 pasid_set_flptr(pte, (u64)__pa(pgd));
606 if (flags & PASID_FLAG_SUPERVISOR_MODE) {
607 if (!ecap_srs(iommu->ecap)) {
608 pr_err("No supervisor request support on %s\n",
609 iommu->name);
610 return -EINVAL;
611 }
612 pasid_set_sre(pte);
613 if (pasid_enable_wpe(pte))
614 return -EINVAL;
615
616 }
617
618 if (flags & PASID_FLAG_FL5LP) {
619 if (cap_5lp_support(iommu->cap)) {
620 pasid_set_flpm(pte, 1);
621 } else {
622 pr_err("No 5-level paging support for first-level\n");
623 pasid_clear_entry(pte);
624 return -EINVAL;
625 }
626 }
627
628 if (flags & PASID_FLAG_PAGE_SNOOP)
629 pasid_set_pgsnp(pte);
630
631 pasid_set_domain_id(pte, did);
632 pasid_set_address_width(pte, iommu->agaw);
633 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
634
635
636 pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
637 pasid_set_present(pte);
638 pasid_flush_caches(iommu, pte, pasid, did);
639
640 return 0;
641}
642
643
644
645
646
647static inline int iommu_skip_agaw(struct dmar_domain *domain,
648 struct intel_iommu *iommu,
649 struct dma_pte **pgd)
650{
651 int agaw;
652
653 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
654 *pgd = phys_to_virt(dma_pte_addr(*pgd));
655 if (!dma_pte_present(*pgd))
656 return -EINVAL;
657 }
658
659 return agaw;
660}
661
662
663
664
665int intel_pasid_setup_second_level(struct intel_iommu *iommu,
666 struct dmar_domain *domain,
667 struct device *dev, u32 pasid)
668{
669 struct pasid_entry *pte;
670 struct dma_pte *pgd;
671 u64 pgd_val;
672 int agaw;
673 u16 did;
674
675
676
677
678
679 if (!ecap_slts(iommu->ecap)) {
680 pr_err("No second level translation support on %s\n",
681 iommu->name);
682 return -EINVAL;
683 }
684
685 pgd = domain->pgd;
686 agaw = iommu_skip_agaw(domain, iommu, &pgd);
687 if (agaw < 0) {
688 dev_err(dev, "Invalid domain page table\n");
689 return -EINVAL;
690 }
691
692 pgd_val = virt_to_phys(pgd);
693 did = domain->iommu_did[iommu->seq_id];
694
695 pte = intel_pasid_get_entry(dev, pasid);
696 if (!pte) {
697 dev_err(dev, "Failed to get pasid entry of PASID %d\n", pasid);
698 return -ENODEV;
699 }
700
701
702 if (pasid_pte_is_present(pte))
703 return -EBUSY;
704
705 pasid_clear_entry(pte);
706 pasid_set_domain_id(pte, did);
707 pasid_set_slptr(pte, pgd_val);
708 pasid_set_address_width(pte, agaw);
709 pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY);
710 pasid_set_fault_enable(pte);
711 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
712
713 if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
714 pasid_set_pgsnp(pte);
715
716
717
718
719
720 if (pasid != PASID_RID2PASID)
721 pasid_set_sre(pte);
722 pasid_set_present(pte);
723 pasid_flush_caches(iommu, pte, pasid, did);
724
725 return 0;
726}
727
728
729
730
731int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
732 struct dmar_domain *domain,
733 struct device *dev, u32 pasid)
734{
735 u16 did = FLPT_DEFAULT_DID;
736 struct pasid_entry *pte;
737
738 pte = intel_pasid_get_entry(dev, pasid);
739 if (!pte) {
740 dev_err(dev, "Failed to get pasid entry of PASID %d\n", pasid);
741 return -ENODEV;
742 }
743
744
745 if (pasid_pte_is_present(pte))
746 return -EBUSY;
747
748 pasid_clear_entry(pte);
749 pasid_set_domain_id(pte, did);
750 pasid_set_address_width(pte, iommu->agaw);
751 pasid_set_translation_type(pte, PASID_ENTRY_PGTT_PT);
752 pasid_set_fault_enable(pte);
753 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
754
755
756
757
758
759 pasid_set_sre(pte);
760 pasid_set_present(pte);
761 pasid_flush_caches(iommu, pte, pasid, did);
762
763 return 0;
764}
765
766static int
767intel_pasid_setup_bind_data(struct intel_iommu *iommu, struct pasid_entry *pte,
768 struct iommu_gpasid_bind_data_vtd *pasid_data)
769{
770
771
772
773
774
775
776
777 if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_SRE) {
778 if (!ecap_srs(iommu->ecap)) {
779 pr_err_ratelimited("No supervisor request support on %s\n",
780 iommu->name);
781 return -EINVAL;
782 }
783 pasid_set_sre(pte);
784
785 if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_WPE)
786 pasid_set_wpe(pte);
787 }
788
789 if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_EAFE) {
790 if (!ecap_eafs(iommu->ecap)) {
791 pr_err_ratelimited("No extended access flag support on %s\n",
792 iommu->name);
793 return -EINVAL;
794 }
795 pasid_set_eafe(pte);
796 }
797
798
799
800
801
802 if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_MTS_MASK) {
803 pr_warn_ratelimited("No memory type support %s\n",
804 iommu->name);
805 return -EINVAL;
806 }
807
808 return 0;
809}
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
826 pgd_t *gpgd, u32 pasid,
827 struct iommu_gpasid_bind_data_vtd *pasid_data,
828 struct dmar_domain *domain, int addr_width)
829{
830 struct pasid_entry *pte;
831 struct dma_pte *pgd;
832 int ret = 0;
833 u64 pgd_val;
834 int agaw;
835 u16 did;
836
837 if (!ecap_nest(iommu->ecap)) {
838 pr_err_ratelimited("IOMMU: %s: No nested translation support\n",
839 iommu->name);
840 return -EINVAL;
841 }
842
843 if (!(domain->flags & DOMAIN_FLAG_NESTING_MODE)) {
844 pr_err_ratelimited("Domain is not in nesting mode, %x\n",
845 domain->flags);
846 return -EINVAL;
847 }
848
849 pte = intel_pasid_get_entry(dev, pasid);
850 if (WARN_ON(!pte))
851 return -EINVAL;
852
853
854
855
856
857 if (pasid_pte_is_present(pte))
858 return -EBUSY;
859
860 pasid_clear_entry(pte);
861
862
863
864
865
866
867 switch (addr_width) {
868#ifdef CONFIG_X86
869 case ADDR_WIDTH_5LEVEL:
870 if (!cpu_feature_enabled(X86_FEATURE_LA57) ||
871 !cap_5lp_support(iommu->cap)) {
872 dev_err_ratelimited(dev,
873 "5-level paging not supported\n");
874 return -EINVAL;
875 }
876
877 pasid_set_flpm(pte, 1);
878 break;
879#endif
880 case ADDR_WIDTH_4LEVEL:
881 pasid_set_flpm(pte, 0);
882 break;
883 default:
884 dev_err_ratelimited(dev, "Invalid guest address width %d\n",
885 addr_width);
886 return -EINVAL;
887 }
888
889
890 if ((uintptr_t)gpgd > domain->max_addr) {
891 dev_err_ratelimited(dev,
892 "Guest PGD %lx not supported, max %llx\n",
893 (uintptr_t)gpgd, domain->max_addr);
894 return -EINVAL;
895 }
896 pasid_set_flptr(pte, (uintptr_t)gpgd);
897
898 ret = intel_pasid_setup_bind_data(iommu, pte, pasid_data);
899 if (ret)
900 return ret;
901
902
903 pgd = domain->pgd;
904
905 agaw = iommu_skip_agaw(domain, iommu, &pgd);
906 if (agaw < 0) {
907 dev_err_ratelimited(dev, "Invalid domain page table\n");
908 return -EINVAL;
909 }
910 pgd_val = virt_to_phys(pgd);
911 pasid_set_slptr(pte, pgd_val);
912 pasid_set_fault_enable(pte);
913
914 did = domain->iommu_did[iommu->seq_id];
915 pasid_set_domain_id(pte, did);
916
917 pasid_set_address_width(pte, agaw);
918 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
919
920 pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED);
921 pasid_set_present(pte);
922 pasid_flush_caches(iommu, pte, pasid, did);
923
924 return ret;
925}
926