1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include "qemu/osdep.h"
23#include "qemu/error-report.h"
24#include "qapi/error.h"
25#include "hw/sysbus.h"
26#include "exec/address-spaces.h"
27#include "intel_iommu_internal.h"
28#include "hw/pci/pci.h"
29#include "hw/pci/pci_bus.h"
30#include "hw/i386/pc.h"
31#include "hw/i386/apic-msidef.h"
32#include "hw/boards.h"
33#include "hw/i386/x86-iommu.h"
34#include "hw/pci-host/q35.h"
35#include "sysemu/kvm.h"
36#include "hw/i386/apic_internal.h"
37#include "kvm_i386.h"
38#include "trace.h"
39
40static void vtd_define_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val,
41 uint64_t wmask, uint64_t w1cmask)
42{
43 stq_le_p(&s->csr[addr], val);
44 stq_le_p(&s->wmask[addr], wmask);
45 stq_le_p(&s->w1cmask[addr], w1cmask);
46}
47
48static void vtd_define_quad_wo(IntelIOMMUState *s, hwaddr addr, uint64_t mask)
49{
50 stq_le_p(&s->womask[addr], mask);
51}
52
53static void vtd_define_long(IntelIOMMUState *s, hwaddr addr, uint32_t val,
54 uint32_t wmask, uint32_t w1cmask)
55{
56 stl_le_p(&s->csr[addr], val);
57 stl_le_p(&s->wmask[addr], wmask);
58 stl_le_p(&s->w1cmask[addr], w1cmask);
59}
60
61static void vtd_define_long_wo(IntelIOMMUState *s, hwaddr addr, uint32_t mask)
62{
63 stl_le_p(&s->womask[addr], mask);
64}
65
66
67static void vtd_set_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val)
68{
69 uint64_t oldval = ldq_le_p(&s->csr[addr]);
70 uint64_t wmask = ldq_le_p(&s->wmask[addr]);
71 uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
72 stq_le_p(&s->csr[addr],
73 ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val));
74}
75
76static void vtd_set_long(IntelIOMMUState *s, hwaddr addr, uint32_t val)
77{
78 uint32_t oldval = ldl_le_p(&s->csr[addr]);
79 uint32_t wmask = ldl_le_p(&s->wmask[addr]);
80 uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
81 stl_le_p(&s->csr[addr],
82 ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val));
83}
84
85static uint64_t vtd_get_quad(IntelIOMMUState *s, hwaddr addr)
86{
87 uint64_t val = ldq_le_p(&s->csr[addr]);
88 uint64_t womask = ldq_le_p(&s->womask[addr]);
89 return val & ~womask;
90}
91
92static uint32_t vtd_get_long(IntelIOMMUState *s, hwaddr addr)
93{
94 uint32_t val = ldl_le_p(&s->csr[addr]);
95 uint32_t womask = ldl_le_p(&s->womask[addr]);
96 return val & ~womask;
97}
98
99
100static uint64_t vtd_get_quad_raw(IntelIOMMUState *s, hwaddr addr)
101{
102 return ldq_le_p(&s->csr[addr]);
103}
104
105static uint32_t vtd_get_long_raw(IntelIOMMUState *s, hwaddr addr)
106{
107 return ldl_le_p(&s->csr[addr]);
108}
109
110static void vtd_set_quad_raw(IntelIOMMUState *s, hwaddr addr, uint64_t val)
111{
112 stq_le_p(&s->csr[addr], val);
113}
114
115static uint32_t vtd_set_clear_mask_long(IntelIOMMUState *s, hwaddr addr,
116 uint32_t clear, uint32_t mask)
117{
118 uint32_t new_val = (ldl_le_p(&s->csr[addr]) & ~clear) | mask;
119 stl_le_p(&s->csr[addr], new_val);
120 return new_val;
121}
122
123static uint64_t vtd_set_clear_mask_quad(IntelIOMMUState *s, hwaddr addr,
124 uint64_t clear, uint64_t mask)
125{
126 uint64_t new_val = (ldq_le_p(&s->csr[addr]) & ~clear) | mask;
127 stq_le_p(&s->csr[addr], new_val);
128 return new_val;
129}
130
131
132static gboolean vtd_uint64_equal(gconstpointer v1, gconstpointer v2)
133{
134 return *((const uint64_t *)v1) == *((const uint64_t *)v2);
135}
136
137static guint vtd_uint64_hash(gconstpointer v)
138{
139 return (guint)*(const uint64_t *)v;
140}
141
142static gboolean vtd_hash_remove_by_domain(gpointer key, gpointer value,
143 gpointer user_data)
144{
145 VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value;
146 uint16_t domain_id = *(uint16_t *)user_data;
147 return entry->domain_id == domain_id;
148}
149
150
151static inline uint32_t vtd_slpt_level_shift(uint32_t level)
152{
153 assert(level != 0);
154 return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_SL_LEVEL_BITS;
155}
156
157static inline uint64_t vtd_slpt_level_page_mask(uint32_t level)
158{
159 return ~((1ULL << vtd_slpt_level_shift(level)) - 1);
160}
161
162static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value,
163 gpointer user_data)
164{
165 VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value;
166 VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data;
167 uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask;
168 uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K;
169 return (entry->domain_id == info->domain_id) &&
170 (((entry->gfn & info->mask) == gfn) ||
171 (entry->gfn == gfn_tlb));
172}
173
174
175
176
177static void vtd_reset_context_cache(IntelIOMMUState *s)
178{
179 VTDAddressSpace *vtd_as;
180 VTDBus *vtd_bus;
181 GHashTableIter bus_it;
182 uint32_t devfn_it;
183
184 trace_vtd_context_cache_reset();
185
186 g_hash_table_iter_init(&bus_it, s->vtd_as_by_busptr);
187
188 while (g_hash_table_iter_next (&bus_it, NULL, (void**)&vtd_bus)) {
189 for (devfn_it = 0; devfn_it < X86_IOMMU_PCI_DEVFN_MAX; ++devfn_it) {
190 vtd_as = vtd_bus->dev_as[devfn_it];
191 if (!vtd_as) {
192 continue;
193 }
194 vtd_as->context_cache_entry.context_cache_gen = 0;
195 }
196 }
197 s->context_cache_gen = 1;
198}
199
200static void vtd_reset_iotlb(IntelIOMMUState *s)
201{
202 assert(s->iotlb);
203 g_hash_table_remove_all(s->iotlb);
204}
205
206static uint64_t vtd_get_iotlb_key(uint64_t gfn, uint16_t source_id,
207 uint32_t level)
208{
209 return gfn | ((uint64_t)(source_id) << VTD_IOTLB_SID_SHIFT) |
210 ((uint64_t)(level) << VTD_IOTLB_LVL_SHIFT);
211}
212
213static uint64_t vtd_get_iotlb_gfn(hwaddr addr, uint32_t level)
214{
215 return (addr & vtd_slpt_level_page_mask(level)) >> VTD_PAGE_SHIFT_4K;
216}
217
218static VTDIOTLBEntry *vtd_lookup_iotlb(IntelIOMMUState *s, uint16_t source_id,
219 hwaddr addr)
220{
221 VTDIOTLBEntry *entry;
222 uint64_t key;
223 int level;
224
225 for (level = VTD_SL_PT_LEVEL; level < VTD_SL_PML4_LEVEL; level++) {
226 key = vtd_get_iotlb_key(vtd_get_iotlb_gfn(addr, level),
227 source_id, level);
228 entry = g_hash_table_lookup(s->iotlb, &key);
229 if (entry) {
230 goto out;
231 }
232 }
233
234out:
235 return entry;
236}
237
238static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id,
239 uint16_t domain_id, hwaddr addr, uint64_t slpte,
240 uint8_t access_flags, uint32_t level)
241{
242 VTDIOTLBEntry *entry = g_malloc(sizeof(*entry));
243 uint64_t *key = g_malloc(sizeof(*key));
244 uint64_t gfn = vtd_get_iotlb_gfn(addr, level);
245
246 trace_vtd_iotlb_page_update(source_id, addr, slpte, domain_id);
247 if (g_hash_table_size(s->iotlb) >= VTD_IOTLB_MAX_SIZE) {
248 trace_vtd_iotlb_reset("iotlb exceeds size limit");
249 vtd_reset_iotlb(s);
250 }
251
252 entry->gfn = gfn;
253 entry->domain_id = domain_id;
254 entry->slpte = slpte;
255 entry->access_flags = access_flags;
256 entry->mask = vtd_slpt_level_page_mask(level);
257 *key = vtd_get_iotlb_key(gfn, source_id, level);
258 g_hash_table_replace(s->iotlb, key, entry);
259}
260
261
262
263
264static void vtd_generate_interrupt(IntelIOMMUState *s, hwaddr mesg_addr_reg,
265 hwaddr mesg_data_reg)
266{
267 MSIMessage msi;
268
269 assert(mesg_data_reg < DMAR_REG_SIZE);
270 assert(mesg_addr_reg < DMAR_REG_SIZE);
271
272 msi.address = vtd_get_long_raw(s, mesg_addr_reg);
273 msi.data = vtd_get_long_raw(s, mesg_data_reg);
274
275 trace_vtd_irq_generate(msi.address, msi.data);
276
277 apic_get_class()->send_msi(&msi);
278}
279
280
281
282
283
284static void vtd_generate_fault_event(IntelIOMMUState *s, uint32_t pre_fsts)
285{
286 if (pre_fsts & VTD_FSTS_PPF || pre_fsts & VTD_FSTS_PFO ||
287 pre_fsts & VTD_FSTS_IQE) {
288 trace_vtd_err("There are previous interrupt conditions "
289 "to be serviced by software, fault event "
290 "is not generated.");
291 return;
292 }
293 vtd_set_clear_mask_long(s, DMAR_FECTL_REG, 0, VTD_FECTL_IP);
294 if (vtd_get_long_raw(s, DMAR_FECTL_REG) & VTD_FECTL_IM) {
295 trace_vtd_err("Interrupt Mask set, irq is not generated.");
296 } else {
297 vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG);
298 vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
299 }
300}
301
302
303
304
305static bool vtd_is_frcd_set(IntelIOMMUState *s, uint16_t index)
306{
307
308 hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
309 addr += 8;
310
311 assert(index < DMAR_FRCD_REG_NR);
312
313 return vtd_get_quad_raw(s, addr) & VTD_FRCD_F;
314}
315
316
317
318
319
320static void vtd_update_fsts_ppf(IntelIOMMUState *s)
321{
322 uint32_t i;
323 uint32_t ppf_mask = 0;
324
325 for (i = 0; i < DMAR_FRCD_REG_NR; i++) {
326 if (vtd_is_frcd_set(s, i)) {
327 ppf_mask = VTD_FSTS_PPF;
328 break;
329 }
330 }
331 vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_PPF, ppf_mask);
332 trace_vtd_fsts_ppf(!!ppf_mask);
333}
334
335static void vtd_set_frcd_and_update_ppf(IntelIOMMUState *s, uint16_t index)
336{
337
338 hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
339 addr += 8;
340
341 assert(index < DMAR_FRCD_REG_NR);
342
343 vtd_set_clear_mask_quad(s, addr, 0, VTD_FRCD_F);
344 vtd_update_fsts_ppf(s);
345}
346
347
348static void vtd_record_frcd(IntelIOMMUState *s, uint16_t index,
349 uint16_t source_id, hwaddr addr,
350 VTDFaultReason fault, bool is_write)
351{
352 uint64_t hi = 0, lo;
353 hwaddr frcd_reg_addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
354
355 assert(index < DMAR_FRCD_REG_NR);
356
357 lo = VTD_FRCD_FI(addr);
358 hi = VTD_FRCD_SID(source_id) | VTD_FRCD_FR(fault);
359 if (!is_write) {
360 hi |= VTD_FRCD_T;
361 }
362 vtd_set_quad_raw(s, frcd_reg_addr, lo);
363 vtd_set_quad_raw(s, frcd_reg_addr + 8, hi);
364
365 trace_vtd_frr_new(index, hi, lo);
366}
367
368
369static bool vtd_try_collapse_fault(IntelIOMMUState *s, uint16_t source_id)
370{
371 uint32_t i;
372 uint64_t frcd_reg;
373 hwaddr addr = DMAR_FRCD_REG_OFFSET + 8;
374
375 for (i = 0; i < DMAR_FRCD_REG_NR; i++) {
376 frcd_reg = vtd_get_quad_raw(s, addr);
377 if ((frcd_reg & VTD_FRCD_F) &&
378 ((frcd_reg & VTD_FRCD_SID_MASK) == source_id)) {
379 return true;
380 }
381 addr += 16;
382 }
383 return false;
384}
385
386
387static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id,
388 hwaddr addr, VTDFaultReason fault,
389 bool is_write)
390{
391 uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
392
393 assert(fault < VTD_FR_MAX);
394
395 if (fault == VTD_FR_RESERVED_ERR) {
396
397 return;
398 }
399
400 trace_vtd_dmar_fault(source_id, fault, addr, is_write);
401
402 if (fsts_reg & VTD_FSTS_PFO) {
403 trace_vtd_err("New fault is not recorded due to "
404 "Primary Fault Overflow.");
405 return;
406 }
407
408 if (vtd_try_collapse_fault(s, source_id)) {
409 trace_vtd_err("New fault is not recorded due to "
410 "compression of faults.");
411 return;
412 }
413
414 if (vtd_is_frcd_set(s, s->next_frcd_reg)) {
415 trace_vtd_err("Next Fault Recording Reg is used, "
416 "new fault is not recorded, set PFO field.");
417 vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_PFO);
418 return;
419 }
420
421 vtd_record_frcd(s, s->next_frcd_reg, source_id, addr, fault, is_write);
422
423 if (fsts_reg & VTD_FSTS_PPF) {
424 trace_vtd_err("There are pending faults already, "
425 "fault event is not generated.");
426 vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg);
427 s->next_frcd_reg++;
428 if (s->next_frcd_reg == DMAR_FRCD_REG_NR) {
429 s->next_frcd_reg = 0;
430 }
431 } else {
432 vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_FRI_MASK,
433 VTD_FSTS_FRI(s->next_frcd_reg));
434 vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg);
435 s->next_frcd_reg++;
436 if (s->next_frcd_reg == DMAR_FRCD_REG_NR) {
437 s->next_frcd_reg = 0;
438 }
439
440
441
442 vtd_generate_fault_event(s, fsts_reg);
443 }
444}
445
446
447
448
449static void vtd_handle_inv_queue_error(IntelIOMMUState *s)
450{
451 uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
452
453 vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_IQE);
454 vtd_generate_fault_event(s, fsts_reg);
455}
456
457
458static void vtd_generate_completion_event(IntelIOMMUState *s)
459{
460 if (vtd_get_long_raw(s, DMAR_ICS_REG) & VTD_ICS_IWC) {
461 trace_vtd_inv_desc_wait_irq("One pending, skip current");
462 return;
463 }
464 vtd_set_clear_mask_long(s, DMAR_ICS_REG, 0, VTD_ICS_IWC);
465 vtd_set_clear_mask_long(s, DMAR_IECTL_REG, 0, VTD_IECTL_IP);
466 if (vtd_get_long_raw(s, DMAR_IECTL_REG) & VTD_IECTL_IM) {
467 trace_vtd_inv_desc_wait_irq("IM in IECTL_REG is set, "
468 "new event not generated");
469 return;
470 } else {
471
472 trace_vtd_inv_desc_wait_irq("Generating complete event");
473 vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG);
474 vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
475 }
476}
477
478static inline bool vtd_root_entry_present(VTDRootEntry *root)
479{
480 return root->val & VTD_ROOT_ENTRY_P;
481}
482
483static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index,
484 VTDRootEntry *re)
485{
486 dma_addr_t addr;
487
488 addr = s->root + index * sizeof(*re);
489 if (dma_memory_read(&address_space_memory, addr, re, sizeof(*re))) {
490 trace_vtd_re_invalid(re->rsvd, re->val);
491 re->val = 0;
492 return -VTD_FR_ROOT_TABLE_INV;
493 }
494 re->val = le64_to_cpu(re->val);
495 return 0;
496}
497
498static inline bool vtd_ce_present(VTDContextEntry *context)
499{
500 return context->lo & VTD_CONTEXT_ENTRY_P;
501}
502
503static int vtd_get_context_entry_from_root(VTDRootEntry *root, uint8_t index,
504 VTDContextEntry *ce)
505{
506 dma_addr_t addr;
507
508
509 addr = (root->val & VTD_ROOT_ENTRY_CTP) + index * sizeof(*ce);
510 if (dma_memory_read(&address_space_memory, addr, ce, sizeof(*ce))) {
511 trace_vtd_re_invalid(root->rsvd, root->val);
512 return -VTD_FR_CONTEXT_TABLE_INV;
513 }
514 ce->lo = le64_to_cpu(ce->lo);
515 ce->hi = le64_to_cpu(ce->hi);
516 return 0;
517}
518
519static inline dma_addr_t vtd_ce_get_slpt_base(VTDContextEntry *ce)
520{
521 return ce->lo & VTD_CONTEXT_ENTRY_SLPTPTR;
522}
523
524static inline uint64_t vtd_get_slpte_addr(uint64_t slpte)
525{
526 return slpte & VTD_SL_PT_BASE_ADDR_MASK;
527}
528
529
530static inline bool vtd_is_last_slpte(uint64_t slpte, uint32_t level)
531{
532 return level == VTD_SL_PT_LEVEL || (slpte & VTD_SL_PT_PAGE_SIZE_MASK);
533}
534
535
536static uint64_t vtd_get_slpte(dma_addr_t base_addr, uint32_t index)
537{
538 uint64_t slpte;
539
540 assert(index < VTD_SL_PT_ENTRY_NR);
541
542 if (dma_memory_read(&address_space_memory,
543 base_addr + index * sizeof(slpte), &slpte,
544 sizeof(slpte))) {
545 slpte = (uint64_t)-1;
546 return slpte;
547 }
548 slpte = le64_to_cpu(slpte);
549 return slpte;
550}
551
552
553
554
555static inline uint32_t vtd_iova_level_offset(uint64_t iova, uint32_t level)
556{
557 return (iova >> vtd_slpt_level_shift(level)) &
558 ((1ULL << VTD_SL_LEVEL_BITS) - 1);
559}
560
561
562static inline bool vtd_is_level_supported(IntelIOMMUState *s, uint32_t level)
563{
564 return VTD_CAP_SAGAW_MASK & s->cap &
565 (1ULL << (level - 2 + VTD_CAP_SAGAW_SHIFT));
566}
567
568
569
570
571static inline uint32_t vtd_ce_get_level(VTDContextEntry *ce)
572{
573 return 2 + (ce->hi & VTD_CONTEXT_ENTRY_AW);
574}
575
576static inline uint32_t vtd_ce_get_agaw(VTDContextEntry *ce)
577{
578 return 30 + (ce->hi & VTD_CONTEXT_ENTRY_AW) * 9;
579}
580
581static inline uint32_t vtd_ce_get_type(VTDContextEntry *ce)
582{
583 return ce->lo & VTD_CONTEXT_ENTRY_TT;
584}
585
586
587static inline bool vtd_ce_type_check(X86IOMMUState *x86_iommu,
588 VTDContextEntry *ce)
589{
590 switch (vtd_ce_get_type(ce)) {
591 case VTD_CONTEXT_TT_MULTI_LEVEL:
592
593 break;
594 case VTD_CONTEXT_TT_DEV_IOTLB:
595 if (!x86_iommu->dt_supported) {
596 return false;
597 }
598 break;
599 case VTD_CONTEXT_TT_PASS_THROUGH:
600 if (!x86_iommu->pt_supported) {
601 return false;
602 }
603 break;
604 default:
605
606 return false;
607 }
608 return true;
609}
610
611static inline uint64_t vtd_iova_limit(VTDContextEntry *ce)
612{
613 uint32_t ce_agaw = vtd_ce_get_agaw(ce);
614 return 1ULL << MIN(ce_agaw, VTD_MGAW);
615}
616
617
618static inline bool vtd_iova_range_check(uint64_t iova, VTDContextEntry *ce)
619{
620
621
622
623
624 return !(iova & ~(vtd_iova_limit(ce) - 1));
625}
626
627static const uint64_t vtd_paging_entry_rsvd_field[] = {
628 [0] = ~0ULL,
629
630 [1] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
631 [2] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
632 [3] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
633 [4] = 0x880ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
634
635 [5] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
636 [6] = 0x1ff800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
637 [7] = 0x3ffff800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
638 [8] = 0x880ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
639};
640
641static bool vtd_slpte_nonzero_rsvd(uint64_t slpte, uint32_t level)
642{
643 if (slpte & VTD_SL_PT_PAGE_SIZE_MASK) {
644
645 return slpte & vtd_paging_entry_rsvd_field[level + 4];
646 } else {
647 return slpte & vtd_paging_entry_rsvd_field[level];
648 }
649}
650
651
652static VTDBus *vtd_find_as_from_bus_num(IntelIOMMUState *s, uint8_t bus_num)
653{
654 VTDBus *vtd_bus = s->vtd_as_by_bus_num[bus_num];
655 if (!vtd_bus) {
656
657
658
659
660
661 GHashTableIter iter;
662
663 g_hash_table_iter_init(&iter, s->vtd_as_by_busptr);
664 while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) {
665 if (pci_bus_num(vtd_bus->bus) == bus_num) {
666 s->vtd_as_by_bus_num[bus_num] = vtd_bus;
667 return vtd_bus;
668 }
669 }
670 }
671 return vtd_bus;
672}
673
674
675
676
677static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write,
678 uint64_t *slptep, uint32_t *slpte_level,
679 bool *reads, bool *writes)
680{
681 dma_addr_t addr = vtd_ce_get_slpt_base(ce);
682 uint32_t level = vtd_ce_get_level(ce);
683 uint32_t offset;
684 uint64_t slpte;
685 uint64_t access_right_check;
686
687 if (!vtd_iova_range_check(iova, ce)) {
688 trace_vtd_err_dmar_iova_overflow(iova);
689 return -VTD_FR_ADDR_BEYOND_MGAW;
690 }
691
692
693 access_right_check = is_write ? VTD_SL_W : VTD_SL_R;
694
695 while (true) {
696 offset = vtd_iova_level_offset(iova, level);
697 slpte = vtd_get_slpte(addr, offset);
698
699 if (slpte == (uint64_t)-1) {
700 trace_vtd_err_dmar_slpte_read_error(iova, level);
701 if (level == vtd_ce_get_level(ce)) {
702
703 return -VTD_FR_CONTEXT_ENTRY_INV;
704 } else {
705 return -VTD_FR_PAGING_ENTRY_INV;
706 }
707 }
708 *reads = (*reads) && (slpte & VTD_SL_R);
709 *writes = (*writes) && (slpte & VTD_SL_W);
710 if (!(slpte & access_right_check)) {
711 trace_vtd_err_dmar_slpte_perm_error(iova, level, slpte, is_write);
712 return is_write ? -VTD_FR_WRITE : -VTD_FR_READ;
713 }
714 if (vtd_slpte_nonzero_rsvd(slpte, level)) {
715 trace_vtd_err_dmar_slpte_resv_error(iova, level, slpte);
716 return -VTD_FR_PAGING_ENTRY_RSVD;
717 }
718
719 if (vtd_is_last_slpte(slpte, level)) {
720 *slptep = slpte;
721 *slpte_level = level;
722 return 0;
723 }
724 addr = vtd_get_slpte_addr(slpte);
725 level--;
726 }
727}
728
729typedef int (*vtd_page_walk_hook)(IOMMUTLBEntry *entry, void *private);
730
731
732
733
734
735
736
737
738
739
740
741
742
743static int vtd_page_walk_level(dma_addr_t addr, uint64_t start,
744 uint64_t end, vtd_page_walk_hook hook_fn,
745 void *private, uint32_t level,
746 bool read, bool write, bool notify_unmap)
747{
748 bool read_cur, write_cur, entry_valid;
749 uint32_t offset;
750 uint64_t slpte;
751 uint64_t subpage_size, subpage_mask;
752 IOMMUTLBEntry entry;
753 uint64_t iova = start;
754 uint64_t iova_next;
755 int ret = 0;
756
757 trace_vtd_page_walk_level(addr, level, start, end);
758
759 subpage_size = 1ULL << vtd_slpt_level_shift(level);
760 subpage_mask = vtd_slpt_level_page_mask(level);
761
762 while (iova < end) {
763 iova_next = (iova & subpage_mask) + subpage_size;
764
765 offset = vtd_iova_level_offset(iova, level);
766 slpte = vtd_get_slpte(addr, offset);
767
768 if (slpte == (uint64_t)-1) {
769 trace_vtd_page_walk_skip_read(iova, iova_next);
770 goto next;
771 }
772
773 if (vtd_slpte_nonzero_rsvd(slpte, level)) {
774 trace_vtd_page_walk_skip_reserve(iova, iova_next);
775 goto next;
776 }
777
778
779 read_cur = read && (slpte & VTD_SL_R);
780 write_cur = write && (slpte & VTD_SL_W);
781
782
783
784
785
786
787 entry_valid = read_cur | write_cur;
788
789 if (vtd_is_last_slpte(slpte, level)) {
790 entry.target_as = &address_space_memory;
791 entry.iova = iova & subpage_mask;
792
793 entry.translated_addr = vtd_get_slpte_addr(slpte);
794 entry.addr_mask = ~subpage_mask;
795 entry.perm = IOMMU_ACCESS_FLAG(read_cur, write_cur);
796 if (!entry_valid && !notify_unmap) {
797 trace_vtd_page_walk_skip_perm(iova, iova_next);
798 goto next;
799 }
800 trace_vtd_page_walk_one(level, entry.iova, entry.translated_addr,
801 entry.addr_mask, entry.perm);
802 if (hook_fn) {
803 ret = hook_fn(&entry, private);
804 if (ret < 0) {
805 return ret;
806 }
807 }
808 } else {
809 if (!entry_valid) {
810 trace_vtd_page_walk_skip_perm(iova, iova_next);
811 goto next;
812 }
813 ret = vtd_page_walk_level(vtd_get_slpte_addr(slpte), iova,
814 MIN(iova_next, end), hook_fn, private,
815 level - 1, read_cur, write_cur,
816 notify_unmap);
817 if (ret < 0) {
818 return ret;
819 }
820 }
821
822next:
823 iova = iova_next;
824 }
825
826 return 0;
827}
828
829
830
831
832
833
834
835
836
837
838static int vtd_page_walk(VTDContextEntry *ce, uint64_t start, uint64_t end,
839 vtd_page_walk_hook hook_fn, void *private,
840 bool notify_unmap)
841{
842 dma_addr_t addr = vtd_ce_get_slpt_base(ce);
843 uint32_t level = vtd_ce_get_level(ce);
844
845 if (!vtd_iova_range_check(start, ce)) {
846 return -VTD_FR_ADDR_BEYOND_MGAW;
847 }
848
849 if (!vtd_iova_range_check(end, ce)) {
850
851 end = vtd_iova_limit(ce);
852 }
853
854 return vtd_page_walk_level(addr, start, end, hook_fn, private,
855 level, true, true, notify_unmap);
856}
857
858
859static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
860 uint8_t devfn, VTDContextEntry *ce)
861{
862 VTDRootEntry re;
863 int ret_fr;
864 X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
865
866 ret_fr = vtd_get_root_entry(s, bus_num, &re);
867 if (ret_fr) {
868 return ret_fr;
869 }
870
871 if (!vtd_root_entry_present(&re)) {
872
873 trace_vtd_re_not_present(bus_num);
874 return -VTD_FR_ROOT_ENTRY_P;
875 }
876
877 if (re.rsvd || (re.val & VTD_ROOT_ENTRY_RSVD)) {
878 trace_vtd_re_invalid(re.rsvd, re.val);
879 return -VTD_FR_ROOT_ENTRY_RSVD;
880 }
881
882 ret_fr = vtd_get_context_entry_from_root(&re, devfn, ce);
883 if (ret_fr) {
884 return ret_fr;
885 }
886
887 if (!vtd_ce_present(ce)) {
888
889 trace_vtd_ce_not_present(bus_num, devfn);
890 return -VTD_FR_CONTEXT_ENTRY_P;
891 }
892
893 if ((ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI) ||
894 (ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO)) {
895 trace_vtd_ce_invalid(ce->hi, ce->lo);
896 return -VTD_FR_CONTEXT_ENTRY_RSVD;
897 }
898
899
900 if (!vtd_is_level_supported(s, vtd_ce_get_level(ce))) {
901 trace_vtd_ce_invalid(ce->hi, ce->lo);
902 return -VTD_FR_CONTEXT_ENTRY_INV;
903 }
904
905
906 if (!vtd_ce_type_check(x86_iommu, ce)) {
907 trace_vtd_ce_invalid(ce->hi, ce->lo);
908 return -VTD_FR_CONTEXT_ENTRY_INV;
909 }
910
911 return 0;
912}
913
914
915
916
917
918
919static int vtd_dev_get_trans_type(VTDAddressSpace *as)
920{
921 IntelIOMMUState *s;
922 VTDContextEntry ce;
923 int ret;
924
925 s = as->iommu_state;
926
927 ret = vtd_dev_to_context_entry(s, pci_bus_num(as->bus),
928 as->devfn, &ce);
929 if (ret) {
930 return ret;
931 }
932
933 return vtd_ce_get_type(&ce);
934}
935
936static bool vtd_dev_pt_enabled(VTDAddressSpace *as)
937{
938 int ret;
939
940 assert(as);
941
942 ret = vtd_dev_get_trans_type(as);
943 if (ret < 0) {
944
945
946
947
948
949
950 return false;
951 }
952
953 return ret == VTD_CONTEXT_TT_PASS_THROUGH;
954}
955
956
957static bool vtd_switch_address_space(VTDAddressSpace *as)
958{
959 bool use_iommu;
960
961 bool take_bql = !qemu_mutex_iothread_locked();
962
963 assert(as);
964
965 use_iommu = as->iommu_state->dmar_enabled & !vtd_dev_pt_enabled(as);
966
967 trace_vtd_switch_address_space(pci_bus_num(as->bus),
968 VTD_PCI_SLOT(as->devfn),
969 VTD_PCI_FUNC(as->devfn),
970 use_iommu);
971
972
973
974
975
976
977 if (take_bql) {
978 qemu_mutex_lock_iothread();
979 }
980
981
982 if (use_iommu) {
983 memory_region_set_enabled(&as->sys_alias, false);
984 memory_region_set_enabled(MEMORY_REGION(&as->iommu), true);
985 } else {
986 memory_region_set_enabled(MEMORY_REGION(&as->iommu), false);
987 memory_region_set_enabled(&as->sys_alias, true);
988 }
989
990 if (take_bql) {
991 qemu_mutex_unlock_iothread();
992 }
993
994 return use_iommu;
995}
996
997static void vtd_switch_address_space_all(IntelIOMMUState *s)
998{
999 GHashTableIter iter;
1000 VTDBus *vtd_bus;
1001 int i;
1002
1003 g_hash_table_iter_init(&iter, s->vtd_as_by_busptr);
1004 while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) {
1005 for (i = 0; i < X86_IOMMU_PCI_DEVFN_MAX; i++) {
1006 if (!vtd_bus->dev_as[i]) {
1007 continue;
1008 }
1009 vtd_switch_address_space(vtd_bus->dev_as[i]);
1010 }
1011 }
1012}
1013
1014static inline uint16_t vtd_make_source_id(uint8_t bus_num, uint8_t devfn)
1015{
1016 return ((bus_num & 0xffUL) << 8) | (devfn & 0xffUL);
1017}
1018
1019static const bool vtd_qualified_faults[] = {
1020 [VTD_FR_RESERVED] = false,
1021 [VTD_FR_ROOT_ENTRY_P] = false,
1022 [VTD_FR_CONTEXT_ENTRY_P] = true,
1023 [VTD_FR_CONTEXT_ENTRY_INV] = true,
1024 [VTD_FR_ADDR_BEYOND_MGAW] = true,
1025 [VTD_FR_WRITE] = true,
1026 [VTD_FR_READ] = true,
1027 [VTD_FR_PAGING_ENTRY_INV] = true,
1028 [VTD_FR_ROOT_TABLE_INV] = false,
1029 [VTD_FR_CONTEXT_TABLE_INV] = false,
1030 [VTD_FR_ROOT_ENTRY_RSVD] = false,
1031 [VTD_FR_PAGING_ENTRY_RSVD] = true,
1032 [VTD_FR_CONTEXT_ENTRY_TT] = true,
1033 [VTD_FR_RESERVED_ERR] = false,
1034 [VTD_FR_MAX] = false,
1035};
1036
1037
1038
1039
1040
1041static inline bool vtd_is_qualified_fault(VTDFaultReason fault)
1042{
1043 return vtd_qualified_faults[fault];
1044}
1045
1046static inline bool vtd_is_interrupt_addr(hwaddr addr)
1047{
1048 return VTD_INTERRUPT_ADDR_FIRST <= addr && addr <= VTD_INTERRUPT_ADDR_LAST;
1049}
1050
1051static void vtd_pt_enable_fast_path(IntelIOMMUState *s, uint16_t source_id)
1052{
1053 VTDBus *vtd_bus;
1054 VTDAddressSpace *vtd_as;
1055 bool success = false;
1056
1057 vtd_bus = vtd_find_as_from_bus_num(s, VTD_SID_TO_BUS(source_id));
1058 if (!vtd_bus) {
1059 goto out;
1060 }
1061
1062 vtd_as = vtd_bus->dev_as[VTD_SID_TO_DEVFN(source_id)];
1063 if (!vtd_as) {
1064 goto out;
1065 }
1066
1067 if (vtd_switch_address_space(vtd_as) == false) {
1068
1069 success = true;
1070 }
1071
1072out:
1073 trace_vtd_pt_enable_fast_path(source_id, success);
1074}
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
1089 uint8_t devfn, hwaddr addr, bool is_write,
1090 IOMMUTLBEntry *entry)
1091{
1092 IntelIOMMUState *s = vtd_as->iommu_state;
1093 VTDContextEntry ce;
1094 uint8_t bus_num = pci_bus_num(bus);
1095 VTDContextCacheEntry *cc_entry = &vtd_as->context_cache_entry;
1096 uint64_t slpte, page_mask;
1097 uint32_t level;
1098 uint16_t source_id = vtd_make_source_id(bus_num, devfn);
1099 int ret_fr;
1100 bool is_fpd_set = false;
1101 bool reads = true;
1102 bool writes = true;
1103 uint8_t access_flags;
1104 VTDIOTLBEntry *iotlb_entry;
1105
1106
1107
1108
1109
1110 assert(!vtd_is_interrupt_addr(addr));
1111
1112
1113 iotlb_entry = vtd_lookup_iotlb(s, source_id, addr);
1114 if (iotlb_entry) {
1115 trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->slpte,
1116 iotlb_entry->domain_id);
1117 slpte = iotlb_entry->slpte;
1118 access_flags = iotlb_entry->access_flags;
1119 page_mask = iotlb_entry->mask;
1120 goto out;
1121 }
1122
1123
1124 if (cc_entry->context_cache_gen == s->context_cache_gen) {
1125 trace_vtd_iotlb_cc_hit(bus_num, devfn, cc_entry->context_entry.hi,
1126 cc_entry->context_entry.lo,
1127 cc_entry->context_cache_gen);
1128 ce = cc_entry->context_entry;
1129 is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
1130 } else {
1131 ret_fr = vtd_dev_to_context_entry(s, bus_num, devfn, &ce);
1132 is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
1133 if (ret_fr) {
1134 ret_fr = -ret_fr;
1135 if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) {
1136 trace_vtd_fault_disabled();
1137 } else {
1138 vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write);
1139 }
1140 goto error;
1141 }
1142
1143 trace_vtd_iotlb_cc_update(bus_num, devfn, ce.hi, ce.lo,
1144 cc_entry->context_cache_gen,
1145 s->context_cache_gen);
1146 cc_entry->context_entry = ce;
1147 cc_entry->context_cache_gen = s->context_cache_gen;
1148 }
1149
1150
1151
1152
1153
1154 if (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH) {
1155 entry->iova = addr & VTD_PAGE_MASK_4K;
1156 entry->translated_addr = entry->iova;
1157 entry->addr_mask = ~VTD_PAGE_MASK_4K;
1158 entry->perm = IOMMU_RW;
1159 trace_vtd_translate_pt(source_id, entry->iova);
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170 vtd_pt_enable_fast_path(s, source_id);
1171
1172 return true;
1173 }
1174
1175 ret_fr = vtd_iova_to_slpte(&ce, addr, is_write, &slpte, &level,
1176 &reads, &writes);
1177 if (ret_fr) {
1178 ret_fr = -ret_fr;
1179 if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) {
1180 trace_vtd_fault_disabled();
1181 } else {
1182 vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write);
1183 }
1184 goto error;
1185 }
1186
1187 page_mask = vtd_slpt_level_page_mask(level);
1188 access_flags = IOMMU_ACCESS_FLAG(reads, writes);
1189 vtd_update_iotlb(s, source_id, VTD_CONTEXT_ENTRY_DID(ce.hi), addr, slpte,
1190 access_flags, level);
1191out:
1192 entry->iova = addr & page_mask;
1193 entry->translated_addr = vtd_get_slpte_addr(slpte) & page_mask;
1194 entry->addr_mask = ~page_mask;
1195 entry->perm = access_flags;
1196 return true;
1197
1198error:
1199 entry->iova = 0;
1200 entry->translated_addr = 0;
1201 entry->addr_mask = 0;
1202 entry->perm = IOMMU_NONE;
1203 return false;
1204}
1205
1206static void vtd_root_table_setup(IntelIOMMUState *s)
1207{
1208 s->root = vtd_get_quad_raw(s, DMAR_RTADDR_REG);
1209 s->root_extended = s->root & VTD_RTADDR_RTT;
1210 s->root &= VTD_RTADDR_ADDR_MASK;
1211
1212 trace_vtd_reg_dmar_root(s->root, s->root_extended);
1213}
1214
1215static void vtd_iec_notify_all(IntelIOMMUState *s, bool global,
1216 uint32_t index, uint32_t mask)
1217{
1218 x86_iommu_iec_notify_all(X86_IOMMU_DEVICE(s), global, index, mask);
1219}
1220
1221static void vtd_interrupt_remap_table_setup(IntelIOMMUState *s)
1222{
1223 uint64_t value = 0;
1224 value = vtd_get_quad_raw(s, DMAR_IRTA_REG);
1225 s->intr_size = 1UL << ((value & VTD_IRTA_SIZE_MASK) + 1);
1226 s->intr_root = value & VTD_IRTA_ADDR_MASK;
1227 s->intr_eime = value & VTD_IRTA_EIME;
1228
1229
1230 vtd_iec_notify_all(s, true, 0, 0);
1231
1232 trace_vtd_reg_ir_root(s->intr_root, s->intr_size);
1233}
1234
1235static void vtd_iommu_replay_all(IntelIOMMUState *s)
1236{
1237 IntelIOMMUNotifierNode *node;
1238
1239 QLIST_FOREACH(node, &s->notifiers_list, next) {
1240 memory_region_iommu_replay_all(&node->vtd_as->iommu);
1241 }
1242}
1243
1244static void vtd_context_global_invalidate(IntelIOMMUState *s)
1245{
1246 trace_vtd_inv_desc_cc_global();
1247 s->context_cache_gen++;
1248 if (s->context_cache_gen == VTD_CONTEXT_CACHE_GEN_MAX) {
1249 vtd_reset_context_cache(s);
1250 }
1251 vtd_switch_address_space_all(s);
1252
1253
1254
1255
1256
1257
1258
1259 vtd_iommu_replay_all(s);
1260}
1261
1262
1263
1264
1265static void vtd_context_device_invalidate(IntelIOMMUState *s,
1266 uint16_t source_id,
1267 uint16_t func_mask)
1268{
1269 uint16_t mask;
1270 VTDBus *vtd_bus;
1271 VTDAddressSpace *vtd_as;
1272 uint8_t bus_n, devfn;
1273 uint16_t devfn_it;
1274
1275 trace_vtd_inv_desc_cc_devices(source_id, func_mask);
1276
1277 switch (func_mask & 3) {
1278 case 0:
1279 mask = 0;
1280 break;
1281 case 1:
1282 mask = 4;
1283 break;
1284 case 2:
1285 mask = 6;
1286 break;
1287 case 3:
1288 mask = 7;
1289 break;
1290 }
1291 mask = ~mask;
1292
1293 bus_n = VTD_SID_TO_BUS(source_id);
1294 vtd_bus = vtd_find_as_from_bus_num(s, bus_n);
1295 if (vtd_bus) {
1296 devfn = VTD_SID_TO_DEVFN(source_id);
1297 for (devfn_it = 0; devfn_it < X86_IOMMU_PCI_DEVFN_MAX; ++devfn_it) {
1298 vtd_as = vtd_bus->dev_as[devfn_it];
1299 if (vtd_as && ((devfn_it & mask) == (devfn & mask))) {
1300 trace_vtd_inv_desc_cc_device(bus_n, VTD_PCI_SLOT(devfn_it),
1301 VTD_PCI_FUNC(devfn_it));
1302 vtd_as->context_cache_entry.context_cache_gen = 0;
1303
1304
1305
1306
1307 vtd_switch_address_space(vtd_as);
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317 memory_region_iommu_replay_all(&vtd_as->iommu);
1318 }
1319 }
1320 }
1321}
1322
1323
1324
1325
1326
1327static uint64_t vtd_context_cache_invalidate(IntelIOMMUState *s, uint64_t val)
1328{
1329 uint64_t caig;
1330 uint64_t type = val & VTD_CCMD_CIRG_MASK;
1331
1332 switch (type) {
1333 case VTD_CCMD_DOMAIN_INVL:
1334
1335 case VTD_CCMD_GLOBAL_INVL:
1336 caig = VTD_CCMD_GLOBAL_INVL_A;
1337 vtd_context_global_invalidate(s);
1338 break;
1339
1340 case VTD_CCMD_DEVICE_INVL:
1341 caig = VTD_CCMD_DEVICE_INVL_A;
1342 vtd_context_device_invalidate(s, VTD_CCMD_SID(val), VTD_CCMD_FM(val));
1343 break;
1344
1345 default:
1346 trace_vtd_err("Context cache invalidate type error.");
1347 caig = 0;
1348 }
1349 return caig;
1350}
1351
1352static void vtd_iotlb_global_invalidate(IntelIOMMUState *s)
1353{
1354 trace_vtd_inv_desc_iotlb_global();
1355 vtd_reset_iotlb(s);
1356 vtd_iommu_replay_all(s);
1357}
1358
1359static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id)
1360{
1361 IntelIOMMUNotifierNode *node;
1362 VTDContextEntry ce;
1363 VTDAddressSpace *vtd_as;
1364
1365 trace_vtd_inv_desc_iotlb_domain(domain_id);
1366
1367 g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_domain,
1368 &domain_id);
1369
1370 QLIST_FOREACH(node, &s->notifiers_list, next) {
1371 vtd_as = node->vtd_as;
1372 if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus),
1373 vtd_as->devfn, &ce) &&
1374 domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) {
1375 memory_region_iommu_replay_all(&vtd_as->iommu);
1376 }
1377 }
1378}
1379
1380static int vtd_page_invalidate_notify_hook(IOMMUTLBEntry *entry,
1381 void *private)
1382{
1383 memory_region_notify_iommu((IOMMUMemoryRegion *)private, *entry);
1384 return 0;
1385}
1386
1387static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s,
1388 uint16_t domain_id, hwaddr addr,
1389 uint8_t am)
1390{
1391 IntelIOMMUNotifierNode *node;
1392 VTDContextEntry ce;
1393 int ret;
1394
1395 QLIST_FOREACH(node, &(s->notifiers_list), next) {
1396 VTDAddressSpace *vtd_as = node->vtd_as;
1397 ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus),
1398 vtd_as->devfn, &ce);
1399 if (!ret && domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) {
1400 vtd_page_walk(&ce, addr, addr + (1 << am) * VTD_PAGE_SIZE,
1401 vtd_page_invalidate_notify_hook,
1402 (void *)&vtd_as->iommu, true);
1403 }
1404 }
1405}
1406
1407static void vtd_iotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id,
1408 hwaddr addr, uint8_t am)
1409{
1410 VTDIOTLBPageInvInfo info;
1411
1412 trace_vtd_inv_desc_iotlb_pages(domain_id, addr, am);
1413
1414 assert(am <= VTD_MAMV);
1415 info.domain_id = domain_id;
1416 info.addr = addr;
1417 info.mask = ~((1 << am) - 1);
1418 g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page, &info);
1419 vtd_iotlb_page_invalidate_notify(s, domain_id, addr, am);
1420}
1421
1422
1423
1424
1425
1426static uint64_t vtd_iotlb_flush(IntelIOMMUState *s, uint64_t val)
1427{
1428 uint64_t iaig;
1429 uint64_t type = val & VTD_TLB_FLUSH_GRANU_MASK;
1430 uint16_t domain_id;
1431 hwaddr addr;
1432 uint8_t am;
1433
1434 switch (type) {
1435 case VTD_TLB_GLOBAL_FLUSH:
1436 iaig = VTD_TLB_GLOBAL_FLUSH_A;
1437 vtd_iotlb_global_invalidate(s);
1438 break;
1439
1440 case VTD_TLB_DSI_FLUSH:
1441 domain_id = VTD_TLB_DID(val);
1442 iaig = VTD_TLB_DSI_FLUSH_A;
1443 vtd_iotlb_domain_invalidate(s, domain_id);
1444 break;
1445
1446 case VTD_TLB_PSI_FLUSH:
1447 domain_id = VTD_TLB_DID(val);
1448 addr = vtd_get_quad_raw(s, DMAR_IVA_REG);
1449 am = VTD_IVA_AM(addr);
1450 addr = VTD_IVA_ADDR(addr);
1451 if (am > VTD_MAMV) {
1452 trace_vtd_err("IOTLB PSI flush: address mask overflow.");
1453 iaig = 0;
1454 break;
1455 }
1456 iaig = VTD_TLB_PSI_FLUSH_A;
1457 vtd_iotlb_page_invalidate(s, domain_id, addr, am);
1458 break;
1459
1460 default:
1461 trace_vtd_err("IOTLB flush: invalid granularity.");
1462 iaig = 0;
1463 }
1464 return iaig;
1465}
1466
1467static void vtd_fetch_inv_desc(IntelIOMMUState *s);
1468
1469static inline bool vtd_queued_inv_disable_check(IntelIOMMUState *s)
1470{
1471 return s->qi_enabled && (s->iq_tail == s->iq_head) &&
1472 (s->iq_last_desc_type == VTD_INV_DESC_WAIT);
1473}
1474
1475static void vtd_handle_gcmd_qie(IntelIOMMUState *s, bool en)
1476{
1477 uint64_t iqa_val = vtd_get_quad_raw(s, DMAR_IQA_REG);
1478
1479 trace_vtd_inv_qi_enable(en);
1480
1481 if (en) {
1482 s->iq = iqa_val & VTD_IQA_IQA_MASK;
1483
1484 s->iq_size = 1UL << ((iqa_val & VTD_IQA_QS) + 8);
1485 s->qi_enabled = true;
1486 trace_vtd_inv_qi_setup(s->iq, s->iq_size);
1487
1488 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_QIES);
1489
1490 if (s->iq_tail != 0) {
1491
1492
1493
1494
1495
1496 trace_vtd_warn_invalid_qi_tail(s->iq_tail);
1497 if (!(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) {
1498 vtd_fetch_inv_desc(s);
1499 }
1500 }
1501 } else {
1502 if (vtd_queued_inv_disable_check(s)) {
1503
1504 vtd_set_quad_raw(s, DMAR_IQH_REG, 0);
1505 s->iq_head = 0;
1506 s->qi_enabled = false;
1507
1508 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_QIES, 0);
1509 } else {
1510 trace_vtd_err_qi_disable(s->iq_head, s->iq_tail, s->iq_last_desc_type);
1511 }
1512 }
1513}
1514
1515
1516static void vtd_handle_gcmd_srtp(IntelIOMMUState *s)
1517{
1518 vtd_root_table_setup(s);
1519
1520 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_RTPS);
1521}
1522
1523
1524static void vtd_handle_gcmd_sirtp(IntelIOMMUState *s)
1525{
1526 vtd_interrupt_remap_table_setup(s);
1527
1528 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRTPS);
1529}
1530
1531
1532static void vtd_handle_gcmd_te(IntelIOMMUState *s, bool en)
1533{
1534 if (s->dmar_enabled == en) {
1535 return;
1536 }
1537
1538 trace_vtd_dmar_enable(en);
1539
1540 if (en) {
1541 s->dmar_enabled = true;
1542
1543 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_TES);
1544 } else {
1545 s->dmar_enabled = false;
1546
1547
1548 s->next_frcd_reg = 0;
1549
1550 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_TES, 0);
1551 }
1552
1553 vtd_switch_address_space_all(s);
1554}
1555
1556
1557static void vtd_handle_gcmd_ire(IntelIOMMUState *s, bool en)
1558{
1559 trace_vtd_ir_enable(en);
1560
1561 if (en) {
1562 s->intr_enabled = true;
1563
1564 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRES);
1565 } else {
1566 s->intr_enabled = false;
1567
1568 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_IRES, 0);
1569 }
1570}
1571
1572
1573static void vtd_handle_gcmd_write(IntelIOMMUState *s)
1574{
1575 uint32_t status = vtd_get_long_raw(s, DMAR_GSTS_REG);
1576 uint32_t val = vtd_get_long_raw(s, DMAR_GCMD_REG);
1577 uint32_t changed = status ^ val;
1578
1579 trace_vtd_reg_write_gcmd(status, val);
1580 if (changed & VTD_GCMD_TE) {
1581
1582 vtd_handle_gcmd_te(s, val & VTD_GCMD_TE);
1583 }
1584 if (val & VTD_GCMD_SRTP) {
1585
1586 vtd_handle_gcmd_srtp(s);
1587 }
1588 if (changed & VTD_GCMD_QIE) {
1589
1590 vtd_handle_gcmd_qie(s, val & VTD_GCMD_QIE);
1591 }
1592 if (val & VTD_GCMD_SIRTP) {
1593
1594 vtd_handle_gcmd_sirtp(s);
1595 }
1596 if (changed & VTD_GCMD_IRE) {
1597
1598 vtd_handle_gcmd_ire(s, val & VTD_GCMD_IRE);
1599 }
1600}
1601
1602
1603static void vtd_handle_ccmd_write(IntelIOMMUState *s)
1604{
1605 uint64_t ret;
1606 uint64_t val = vtd_get_quad_raw(s, DMAR_CCMD_REG);
1607
1608
1609 if (val & VTD_CCMD_ICC) {
1610 if (s->qi_enabled) {
1611 trace_vtd_err("Queued Invalidation enabled, "
1612 "should not use register-based invalidation");
1613 return;
1614 }
1615 ret = vtd_context_cache_invalidate(s, val);
1616
1617 vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_ICC, 0ULL);
1618 ret = vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_CAIG_MASK,
1619 ret);
1620 }
1621}
1622
1623
1624static void vtd_handle_iotlb_write(IntelIOMMUState *s)
1625{
1626 uint64_t ret;
1627 uint64_t val = vtd_get_quad_raw(s, DMAR_IOTLB_REG);
1628
1629
1630 if (val & VTD_TLB_IVT) {
1631 if (s->qi_enabled) {
1632 trace_vtd_err("Queued Invalidation enabled, "
1633 "should not use register-based invalidation.");
1634 return;
1635 }
1636 ret = vtd_iotlb_flush(s, val);
1637
1638 vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG, VTD_TLB_IVT, 0ULL);
1639 ret = vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG,
1640 VTD_TLB_FLUSH_GRANU_MASK_A, ret);
1641 }
1642}
1643
1644
1645static bool vtd_get_inv_desc(dma_addr_t base_addr, uint32_t offset,
1646 VTDInvDesc *inv_desc)
1647{
1648 dma_addr_t addr = base_addr + offset * sizeof(*inv_desc);
1649 if (dma_memory_read(&address_space_memory, addr, inv_desc,
1650 sizeof(*inv_desc))) {
1651 trace_vtd_err("Read INV DESC failed.");
1652 inv_desc->lo = 0;
1653 inv_desc->hi = 0;
1654 return false;
1655 }
1656 inv_desc->lo = le64_to_cpu(inv_desc->lo);
1657 inv_desc->hi = le64_to_cpu(inv_desc->hi);
1658 return true;
1659}
1660
1661static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
1662{
1663 if ((inv_desc->hi & VTD_INV_DESC_WAIT_RSVD_HI) ||
1664 (inv_desc->lo & VTD_INV_DESC_WAIT_RSVD_LO)) {
1665 trace_vtd_inv_desc_wait_invalid(inv_desc->hi, inv_desc->lo);
1666 return false;
1667 }
1668 if (inv_desc->lo & VTD_INV_DESC_WAIT_SW) {
1669
1670 uint32_t status_data = (uint32_t)(inv_desc->lo >>
1671 VTD_INV_DESC_WAIT_DATA_SHIFT);
1672
1673 assert(!(inv_desc->lo & VTD_INV_DESC_WAIT_IF));
1674
1675
1676 dma_addr_t status_addr = inv_desc->hi;
1677 trace_vtd_inv_desc_wait_sw(status_addr, status_data);
1678 status_data = cpu_to_le32(status_data);
1679 if (dma_memory_write(&address_space_memory, status_addr, &status_data,
1680 sizeof(status_data))) {
1681 trace_vtd_inv_desc_wait_write_fail(inv_desc->hi, inv_desc->lo);
1682 return false;
1683 }
1684 } else if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) {
1685
1686 vtd_generate_completion_event(s);
1687 } else {
1688 trace_vtd_inv_desc_wait_invalid(inv_desc->hi, inv_desc->lo);
1689 return false;
1690 }
1691 return true;
1692}
1693
1694static bool vtd_process_context_cache_desc(IntelIOMMUState *s,
1695 VTDInvDesc *inv_desc)
1696{
1697 uint16_t sid, fmask;
1698
1699 if ((inv_desc->lo & VTD_INV_DESC_CC_RSVD) || inv_desc->hi) {
1700 trace_vtd_inv_desc_cc_invalid(inv_desc->hi, inv_desc->lo);
1701 return false;
1702 }
1703 switch (inv_desc->lo & VTD_INV_DESC_CC_G) {
1704 case VTD_INV_DESC_CC_DOMAIN:
1705 trace_vtd_inv_desc_cc_domain(
1706 (uint16_t)VTD_INV_DESC_CC_DID(inv_desc->lo));
1707
1708 case VTD_INV_DESC_CC_GLOBAL:
1709 vtd_context_global_invalidate(s);
1710 break;
1711
1712 case VTD_INV_DESC_CC_DEVICE:
1713 sid = VTD_INV_DESC_CC_SID(inv_desc->lo);
1714 fmask = VTD_INV_DESC_CC_FM(inv_desc->lo);
1715 vtd_context_device_invalidate(s, sid, fmask);
1716 break;
1717
1718 default:
1719 trace_vtd_inv_desc_cc_invalid(inv_desc->hi, inv_desc->lo);
1720 return false;
1721 }
1722 return true;
1723}
1724
1725static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
1726{
1727 uint16_t domain_id;
1728 uint8_t am;
1729 hwaddr addr;
1730
1731 if ((inv_desc->lo & VTD_INV_DESC_IOTLB_RSVD_LO) ||
1732 (inv_desc->hi & VTD_INV_DESC_IOTLB_RSVD_HI)) {
1733 trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
1734 return false;
1735 }
1736
1737 switch (inv_desc->lo & VTD_INV_DESC_IOTLB_G) {
1738 case VTD_INV_DESC_IOTLB_GLOBAL:
1739 vtd_iotlb_global_invalidate(s);
1740 break;
1741
1742 case VTD_INV_DESC_IOTLB_DOMAIN:
1743 domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo);
1744 vtd_iotlb_domain_invalidate(s, domain_id);
1745 break;
1746
1747 case VTD_INV_DESC_IOTLB_PAGE:
1748 domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo);
1749 addr = VTD_INV_DESC_IOTLB_ADDR(inv_desc->hi);
1750 am = VTD_INV_DESC_IOTLB_AM(inv_desc->hi);
1751 if (am > VTD_MAMV) {
1752 trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
1753 return false;
1754 }
1755 vtd_iotlb_page_invalidate(s, domain_id, addr, am);
1756 break;
1757
1758 default:
1759 trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
1760 return false;
1761 }
1762 return true;
1763}
1764
1765static bool vtd_process_inv_iec_desc(IntelIOMMUState *s,
1766 VTDInvDesc *inv_desc)
1767{
1768 trace_vtd_inv_desc_iec(inv_desc->iec.granularity,
1769 inv_desc->iec.index,
1770 inv_desc->iec.index_mask);
1771
1772 vtd_iec_notify_all(s, !inv_desc->iec.granularity,
1773 inv_desc->iec.index,
1774 inv_desc->iec.index_mask);
1775 return true;
1776}
1777
1778static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s,
1779 VTDInvDesc *inv_desc)
1780{
1781 VTDAddressSpace *vtd_dev_as;
1782 IOMMUTLBEntry entry;
1783 struct VTDBus *vtd_bus;
1784 hwaddr addr;
1785 uint64_t sz;
1786 uint16_t sid;
1787 uint8_t devfn;
1788 bool size;
1789 uint8_t bus_num;
1790
1791 addr = VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc->hi);
1792 sid = VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc->lo);
1793 devfn = sid & 0xff;
1794 bus_num = sid >> 8;
1795 size = VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc->hi);
1796
1797 if ((inv_desc->lo & VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO) ||
1798 (inv_desc->hi & VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI)) {
1799 trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
1800 return false;
1801 }
1802
1803 vtd_bus = vtd_find_as_from_bus_num(s, bus_num);
1804 if (!vtd_bus) {
1805 goto done;
1806 }
1807
1808 vtd_dev_as = vtd_bus->dev_as[devfn];
1809 if (!vtd_dev_as) {
1810 goto done;
1811 }
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821 if (size) {
1822 sz = (VTD_PAGE_SIZE * 2) << cto64(addr >> VTD_PAGE_SHIFT);
1823 addr &= ~(sz - 1);
1824 } else {
1825 sz = VTD_PAGE_SIZE;
1826 }
1827
1828 entry.target_as = &vtd_dev_as->as;
1829 entry.addr_mask = sz - 1;
1830 entry.iova = addr;
1831 entry.perm = IOMMU_NONE;
1832 entry.translated_addr = 0;
1833 memory_region_notify_iommu(&vtd_dev_as->iommu, entry);
1834
1835done:
1836 return true;
1837}
1838
1839static bool vtd_process_inv_desc(IntelIOMMUState *s)
1840{
1841 VTDInvDesc inv_desc;
1842 uint8_t desc_type;
1843
1844 trace_vtd_inv_qi_head(s->iq_head);
1845 if (!vtd_get_inv_desc(s->iq, s->iq_head, &inv_desc)) {
1846 s->iq_last_desc_type = VTD_INV_DESC_NONE;
1847 return false;
1848 }
1849 desc_type = inv_desc.lo & VTD_INV_DESC_TYPE;
1850
1851 s->iq_last_desc_type = desc_type;
1852
1853 switch (desc_type) {
1854 case VTD_INV_DESC_CC:
1855 trace_vtd_inv_desc("context-cache", inv_desc.hi, inv_desc.lo);
1856 if (!vtd_process_context_cache_desc(s, &inv_desc)) {
1857 return false;
1858 }
1859 break;
1860
1861 case VTD_INV_DESC_IOTLB:
1862 trace_vtd_inv_desc("iotlb", inv_desc.hi, inv_desc.lo);
1863 if (!vtd_process_iotlb_desc(s, &inv_desc)) {
1864 return false;
1865 }
1866 break;
1867
1868 case VTD_INV_DESC_WAIT:
1869 trace_vtd_inv_desc("wait", inv_desc.hi, inv_desc.lo);
1870 if (!vtd_process_wait_desc(s, &inv_desc)) {
1871 return false;
1872 }
1873 break;
1874
1875 case VTD_INV_DESC_IEC:
1876 trace_vtd_inv_desc("iec", inv_desc.hi, inv_desc.lo);
1877 if (!vtd_process_inv_iec_desc(s, &inv_desc)) {
1878 return false;
1879 }
1880 break;
1881
1882 case VTD_INV_DESC_DEVICE:
1883 trace_vtd_inv_desc("device", inv_desc.hi, inv_desc.lo);
1884 if (!vtd_process_device_iotlb_desc(s, &inv_desc)) {
1885 return false;
1886 }
1887 break;
1888
1889 default:
1890 trace_vtd_inv_desc_invalid(inv_desc.hi, inv_desc.lo);
1891 return false;
1892 }
1893 s->iq_head++;
1894 if (s->iq_head == s->iq_size) {
1895 s->iq_head = 0;
1896 }
1897 return true;
1898}
1899
1900
1901static void vtd_fetch_inv_desc(IntelIOMMUState *s)
1902{
1903 trace_vtd_inv_qi_fetch();
1904
1905 if (s->iq_tail >= s->iq_size) {
1906
1907 trace_vtd_err_qi_tail(s->iq_tail, s->iq_size);
1908 vtd_handle_inv_queue_error(s);
1909 return;
1910 }
1911 while (s->iq_head != s->iq_tail) {
1912 if (!vtd_process_inv_desc(s)) {
1913
1914 vtd_handle_inv_queue_error(s);
1915 break;
1916 }
1917
1918 vtd_set_quad_raw(s, DMAR_IQH_REG,
1919 (((uint64_t)(s->iq_head)) << VTD_IQH_QH_SHIFT) &
1920 VTD_IQH_QH_MASK);
1921 }
1922}
1923
1924
1925static void vtd_handle_iqt_write(IntelIOMMUState *s)
1926{
1927 uint64_t val = vtd_get_quad_raw(s, DMAR_IQT_REG);
1928
1929 s->iq_tail = VTD_IQT_QT(val);
1930 trace_vtd_inv_qi_tail(s->iq_tail);
1931
1932 if (s->qi_enabled && !(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) {
1933
1934 vtd_fetch_inv_desc(s);
1935 }
1936}
1937
1938static void vtd_handle_fsts_write(IntelIOMMUState *s)
1939{
1940 uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
1941 uint32_t fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG);
1942 uint32_t status_fields = VTD_FSTS_PFO | VTD_FSTS_PPF | VTD_FSTS_IQE;
1943
1944 if ((fectl_reg & VTD_FECTL_IP) && !(fsts_reg & status_fields)) {
1945 vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
1946 trace_vtd_fsts_clear_ip();
1947 }
1948
1949
1950
1951}
1952
1953static void vtd_handle_fectl_write(IntelIOMMUState *s)
1954{
1955 uint32_t fectl_reg;
1956
1957
1958
1959
1960 fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG);
1961
1962 trace_vtd_reg_write_fectl(fectl_reg);
1963
1964 if ((fectl_reg & VTD_FECTL_IP) && !(fectl_reg & VTD_FECTL_IM)) {
1965 vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG);
1966 vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
1967 }
1968}
1969
1970static void vtd_handle_ics_write(IntelIOMMUState *s)
1971{
1972 uint32_t ics_reg = vtd_get_long_raw(s, DMAR_ICS_REG);
1973 uint32_t iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG);
1974
1975 if ((iectl_reg & VTD_IECTL_IP) && !(ics_reg & VTD_ICS_IWC)) {
1976 trace_vtd_reg_ics_clear_ip();
1977 vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
1978 }
1979}
1980
1981static void vtd_handle_iectl_write(IntelIOMMUState *s)
1982{
1983 uint32_t iectl_reg;
1984
1985
1986
1987
1988 iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG);
1989
1990 trace_vtd_reg_write_iectl(iectl_reg);
1991
1992 if ((iectl_reg & VTD_IECTL_IP) && !(iectl_reg & VTD_IECTL_IM)) {
1993 vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG);
1994 vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
1995 }
1996}
1997
1998static uint64_t vtd_mem_read(void *opaque, hwaddr addr, unsigned size)
1999{
2000 IntelIOMMUState *s = opaque;
2001 uint64_t val;
2002
2003 trace_vtd_reg_read(addr, size);
2004
2005 if (addr + size > DMAR_REG_SIZE) {
2006 trace_vtd_err("Read MMIO over range.");
2007 return (uint64_t)-1;
2008 }
2009
2010 switch (addr) {
2011
2012 case DMAR_RTADDR_REG:
2013 if (size == 4) {
2014 val = s->root & ((1ULL << 32) - 1);
2015 } else {
2016 val = s->root;
2017 }
2018 break;
2019
2020 case DMAR_RTADDR_REG_HI:
2021 assert(size == 4);
2022 val = s->root >> 32;
2023 break;
2024
2025
2026 case DMAR_IQA_REG:
2027 val = s->iq | (vtd_get_quad(s, DMAR_IQA_REG) & VTD_IQA_QS);
2028 if (size == 4) {
2029 val = val & ((1ULL << 32) - 1);
2030 }
2031 break;
2032
2033 case DMAR_IQA_REG_HI:
2034 assert(size == 4);
2035 val = s->iq >> 32;
2036 break;
2037
2038 default:
2039 if (size == 4) {
2040 val = vtd_get_long(s, addr);
2041 } else {
2042 val = vtd_get_quad(s, addr);
2043 }
2044 }
2045
2046 return val;
2047}
2048
2049static void vtd_mem_write(void *opaque, hwaddr addr,
2050 uint64_t val, unsigned size)
2051{
2052 IntelIOMMUState *s = opaque;
2053
2054 trace_vtd_reg_write(addr, size, val);
2055
2056 if (addr + size > DMAR_REG_SIZE) {
2057 trace_vtd_err("Write MMIO over range.");
2058 return;
2059 }
2060
2061 switch (addr) {
2062
2063 case DMAR_GCMD_REG:
2064 vtd_set_long(s, addr, val);
2065 vtd_handle_gcmd_write(s);
2066 break;
2067
2068
2069 case DMAR_CCMD_REG:
2070 if (size == 4) {
2071 vtd_set_long(s, addr, val);
2072 } else {
2073 vtd_set_quad(s, addr, val);
2074 vtd_handle_ccmd_write(s);
2075 }
2076 break;
2077
2078 case DMAR_CCMD_REG_HI:
2079 assert(size == 4);
2080 vtd_set_long(s, addr, val);
2081 vtd_handle_ccmd_write(s);
2082 break;
2083
2084
2085 case DMAR_IOTLB_REG:
2086 if (size == 4) {
2087 vtd_set_long(s, addr, val);
2088 } else {
2089 vtd_set_quad(s, addr, val);
2090 vtd_handle_iotlb_write(s);
2091 }
2092 break;
2093
2094 case DMAR_IOTLB_REG_HI:
2095 assert(size == 4);
2096 vtd_set_long(s, addr, val);
2097 vtd_handle_iotlb_write(s);
2098 break;
2099
2100
2101 case DMAR_IVA_REG:
2102 if (size == 4) {
2103 vtd_set_long(s, addr, val);
2104 } else {
2105 vtd_set_quad(s, addr, val);
2106 }
2107 break;
2108
2109 case DMAR_IVA_REG_HI:
2110 assert(size == 4);
2111 vtd_set_long(s, addr, val);
2112 break;
2113
2114
2115 case DMAR_FSTS_REG:
2116 assert(size == 4);
2117 vtd_set_long(s, addr, val);
2118 vtd_handle_fsts_write(s);
2119 break;
2120
2121
2122 case DMAR_FECTL_REG:
2123 assert(size == 4);
2124 vtd_set_long(s, addr, val);
2125 vtd_handle_fectl_write(s);
2126 break;
2127
2128
2129 case DMAR_FEDATA_REG:
2130 assert(size == 4);
2131 vtd_set_long(s, addr, val);
2132 break;
2133
2134
2135 case DMAR_FEADDR_REG:
2136 assert(size == 4);
2137 vtd_set_long(s, addr, val);
2138 break;
2139
2140
2141 case DMAR_FEUADDR_REG:
2142 assert(size == 4);
2143 vtd_set_long(s, addr, val);
2144 break;
2145
2146
2147 case DMAR_PMEN_REG:
2148 assert(size == 4);
2149 vtd_set_long(s, addr, val);
2150 break;
2151
2152
2153 case DMAR_RTADDR_REG:
2154 if (size == 4) {
2155 vtd_set_long(s, addr, val);
2156 } else {
2157 vtd_set_quad(s, addr, val);
2158 }
2159 break;
2160
2161 case DMAR_RTADDR_REG_HI:
2162 assert(size == 4);
2163 vtd_set_long(s, addr, val);
2164 break;
2165
2166
2167 case DMAR_IQT_REG:
2168 if (size == 4) {
2169 vtd_set_long(s, addr, val);
2170 } else {
2171 vtd_set_quad(s, addr, val);
2172 }
2173 vtd_handle_iqt_write(s);
2174 break;
2175
2176 case DMAR_IQT_REG_HI:
2177 assert(size == 4);
2178 vtd_set_long(s, addr, val);
2179
2180 break;
2181
2182
2183 case DMAR_IQA_REG:
2184 if (size == 4) {
2185 vtd_set_long(s, addr, val);
2186 } else {
2187 vtd_set_quad(s, addr, val);
2188 }
2189 break;
2190
2191 case DMAR_IQA_REG_HI:
2192 assert(size == 4);
2193 vtd_set_long(s, addr, val);
2194 break;
2195
2196
2197 case DMAR_ICS_REG:
2198 assert(size == 4);
2199 vtd_set_long(s, addr, val);
2200 vtd_handle_ics_write(s);
2201 break;
2202
2203
2204 case DMAR_IECTL_REG:
2205 assert(size == 4);
2206 vtd_set_long(s, addr, val);
2207 vtd_handle_iectl_write(s);
2208 break;
2209
2210
2211 case DMAR_IEDATA_REG:
2212 assert(size == 4);
2213 vtd_set_long(s, addr, val);
2214 break;
2215
2216
2217 case DMAR_IEADDR_REG:
2218 assert(size == 4);
2219 vtd_set_long(s, addr, val);
2220 break;
2221
2222
2223 case DMAR_IEUADDR_REG:
2224 assert(size == 4);
2225 vtd_set_long(s, addr, val);
2226 break;
2227
2228
2229 case DMAR_FRCD_REG_0_0:
2230 if (size == 4) {
2231 vtd_set_long(s, addr, val);
2232 } else {
2233 vtd_set_quad(s, addr, val);
2234 }
2235 break;
2236
2237 case DMAR_FRCD_REG_0_1:
2238 assert(size == 4);
2239 vtd_set_long(s, addr, val);
2240 break;
2241
2242 case DMAR_FRCD_REG_0_2:
2243 if (size == 4) {
2244 vtd_set_long(s, addr, val);
2245 } else {
2246 vtd_set_quad(s, addr, val);
2247
2248 vtd_update_fsts_ppf(s);
2249 }
2250 break;
2251
2252 case DMAR_FRCD_REG_0_3:
2253 assert(size == 4);
2254 vtd_set_long(s, addr, val);
2255
2256 vtd_update_fsts_ppf(s);
2257 break;
2258
2259 case DMAR_IRTA_REG:
2260 if (size == 4) {
2261 vtd_set_long(s, addr, val);
2262 } else {
2263 vtd_set_quad(s, addr, val);
2264 }
2265 break;
2266
2267 case DMAR_IRTA_REG_HI:
2268 assert(size == 4);
2269 vtd_set_long(s, addr, val);
2270 break;
2271
2272 default:
2273 if (size == 4) {
2274 vtd_set_long(s, addr, val);
2275 } else {
2276 vtd_set_quad(s, addr, val);
2277 }
2278 }
2279}
2280
2281static IOMMUTLBEntry vtd_iommu_translate(IOMMUMemoryRegion *iommu, hwaddr addr,
2282 IOMMUAccessFlags flag)
2283{
2284 VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
2285 IntelIOMMUState *s = vtd_as->iommu_state;
2286 IOMMUTLBEntry iotlb = {
2287
2288 .target_as = &address_space_memory,
2289 };
2290 bool success;
2291
2292 if (likely(s->dmar_enabled)) {
2293 success = vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn,
2294 addr, flag & IOMMU_WO, &iotlb);
2295 } else {
2296
2297 iotlb.iova = addr & VTD_PAGE_MASK_4K;
2298 iotlb.translated_addr = addr & VTD_PAGE_MASK_4K;
2299 iotlb.addr_mask = ~VTD_PAGE_MASK_4K;
2300 iotlb.perm = IOMMU_RW;
2301 success = true;
2302 }
2303
2304 if (likely(success)) {
2305 trace_vtd_dmar_translate(pci_bus_num(vtd_as->bus),
2306 VTD_PCI_SLOT(vtd_as->devfn),
2307 VTD_PCI_FUNC(vtd_as->devfn),
2308 iotlb.iova, iotlb.translated_addr,
2309 iotlb.addr_mask);
2310 } else {
2311 trace_vtd_err_dmar_translate(pci_bus_num(vtd_as->bus),
2312 VTD_PCI_SLOT(vtd_as->devfn),
2313 VTD_PCI_FUNC(vtd_as->devfn),
2314 iotlb.iova);
2315 }
2316
2317 return iotlb;
2318}
2319
2320static void vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu,
2321 IOMMUNotifierFlag old,
2322 IOMMUNotifierFlag new)
2323{
2324 VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
2325 IntelIOMMUState *s = vtd_as->iommu_state;
2326 IntelIOMMUNotifierNode *node = NULL;
2327 IntelIOMMUNotifierNode *next_node = NULL;
2328
2329 if (!s->caching_mode && new & IOMMU_NOTIFIER_MAP) {
2330 error_report("We need to set cache_mode=1 for intel-iommu to enable "
2331 "device assignment with IOMMU protection.");
2332 exit(1);
2333 }
2334
2335 if (old == IOMMU_NOTIFIER_NONE) {
2336 node = g_malloc0(sizeof(*node));
2337 node->vtd_as = vtd_as;
2338 QLIST_INSERT_HEAD(&s->notifiers_list, node, next);
2339 return;
2340 }
2341
2342
2343 QLIST_FOREACH_SAFE(node, &s->notifiers_list, next, next_node) {
2344 if (node->vtd_as == vtd_as) {
2345 if (new == IOMMU_NOTIFIER_NONE) {
2346 QLIST_REMOVE(node, next);
2347 g_free(node);
2348 }
2349 return;
2350 }
2351 }
2352}
2353
2354static int vtd_post_load(void *opaque, int version_id)
2355{
2356 IntelIOMMUState *iommu = opaque;
2357
2358
2359
2360
2361
2362
2363 vtd_switch_address_space_all(iommu);
2364
2365 return 0;
2366}
2367
2368static const VMStateDescription vtd_vmstate = {
2369 .name = "iommu-intel",
2370 .version_id = 1,
2371 .minimum_version_id = 1,
2372 .priority = MIG_PRI_IOMMU,
2373 .post_load = vtd_post_load,
2374 .fields = (VMStateField[]) {
2375 VMSTATE_UINT64(root, IntelIOMMUState),
2376 VMSTATE_UINT64(intr_root, IntelIOMMUState),
2377 VMSTATE_UINT64(iq, IntelIOMMUState),
2378 VMSTATE_UINT32(intr_size, IntelIOMMUState),
2379 VMSTATE_UINT16(iq_head, IntelIOMMUState),
2380 VMSTATE_UINT16(iq_tail, IntelIOMMUState),
2381 VMSTATE_UINT16(iq_size, IntelIOMMUState),
2382 VMSTATE_UINT16(next_frcd_reg, IntelIOMMUState),
2383 VMSTATE_UINT8_ARRAY(csr, IntelIOMMUState, DMAR_REG_SIZE),
2384 VMSTATE_UINT8(iq_last_desc_type, IntelIOMMUState),
2385 VMSTATE_BOOL(root_extended, IntelIOMMUState),
2386 VMSTATE_BOOL(dmar_enabled, IntelIOMMUState),
2387 VMSTATE_BOOL(qi_enabled, IntelIOMMUState),
2388 VMSTATE_BOOL(intr_enabled, IntelIOMMUState),
2389 VMSTATE_BOOL(intr_eime, IntelIOMMUState),
2390 VMSTATE_END_OF_LIST()
2391 }
2392};
2393
2394static const MemoryRegionOps vtd_mem_ops = {
2395 .read = vtd_mem_read,
2396 .write = vtd_mem_write,
2397 .endianness = DEVICE_LITTLE_ENDIAN,
2398 .impl = {
2399 .min_access_size = 4,
2400 .max_access_size = 8,
2401 },
2402 .valid = {
2403 .min_access_size = 4,
2404 .max_access_size = 8,
2405 },
2406};
2407
2408static Property vtd_properties[] = {
2409 DEFINE_PROP_UINT32("version", IntelIOMMUState, version, 0),
2410 DEFINE_PROP_ON_OFF_AUTO("eim", IntelIOMMUState, intr_eim,
2411 ON_OFF_AUTO_AUTO),
2412 DEFINE_PROP_BOOL("x-buggy-eim", IntelIOMMUState, buggy_eim, false),
2413 DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState, caching_mode, FALSE),
2414 DEFINE_PROP_END_OF_LIST(),
2415};
2416
2417
2418static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
2419 VTD_IR_TableEntry *entry, uint16_t sid)
2420{
2421 static const uint16_t vtd_svt_mask[VTD_SQ_MAX] = \
2422 {0xffff, 0xfffb, 0xfff9, 0xfff8};
2423 dma_addr_t addr = 0x00;
2424 uint16_t mask, source_id;
2425 uint8_t bus, bus_max, bus_min;
2426
2427 addr = iommu->intr_root + index * sizeof(*entry);
2428 if (dma_memory_read(&address_space_memory, addr, entry,
2429 sizeof(*entry))) {
2430 trace_vtd_err("Memory read failed for IRTE.");
2431 return -VTD_FR_IR_ROOT_INVAL;
2432 }
2433
2434 trace_vtd_ir_irte_get(index, le64_to_cpu(entry->data[1]),
2435 le64_to_cpu(entry->data[0]));
2436
2437 if (!entry->irte.present) {
2438 trace_vtd_err_irte(index, le64_to_cpu(entry->data[1]),
2439 le64_to_cpu(entry->data[0]));
2440 return -VTD_FR_IR_ENTRY_P;
2441 }
2442
2443 if (entry->irte.__reserved_0 || entry->irte.__reserved_1 ||
2444 entry->irte.__reserved_2) {
2445 trace_vtd_err_irte(index, le64_to_cpu(entry->data[1]),
2446 le64_to_cpu(entry->data[0]));
2447 return -VTD_FR_IR_IRTE_RSVD;
2448 }
2449
2450 if (sid != X86_IOMMU_SID_INVALID) {
2451
2452 source_id = le32_to_cpu(entry->irte.source_id);
2453 switch (entry->irte.sid_vtype) {
2454 case VTD_SVT_NONE:
2455 break;
2456
2457 case VTD_SVT_ALL:
2458 mask = vtd_svt_mask[entry->irte.sid_q];
2459 if ((source_id & mask) != (sid & mask)) {
2460 trace_vtd_err_irte_sid(index, sid, source_id);
2461 return -VTD_FR_IR_SID_ERR;
2462 }
2463 break;
2464
2465 case VTD_SVT_BUS:
2466 bus_max = source_id >> 8;
2467 bus_min = source_id & 0xff;
2468 bus = sid >> 8;
2469 if (bus > bus_max || bus < bus_min) {
2470 trace_vtd_err_irte_sid_bus(index, bus, bus_min, bus_max);
2471 return -VTD_FR_IR_SID_ERR;
2472 }
2473 break;
2474
2475 default:
2476 trace_vtd_err_irte_svt(index, entry->irte.sid_vtype);
2477
2478 return -VTD_FR_IR_SID_ERR;
2479 break;
2480 }
2481 }
2482
2483 return 0;
2484}
2485
2486
2487static int vtd_remap_irq_get(IntelIOMMUState *iommu, uint16_t index,
2488 VTDIrq *irq, uint16_t sid)
2489{
2490 VTD_IR_TableEntry irte = {};
2491 int ret = 0;
2492
2493 ret = vtd_irte_get(iommu, index, &irte, sid);
2494 if (ret) {
2495 return ret;
2496 }
2497
2498 irq->trigger_mode = irte.irte.trigger_mode;
2499 irq->vector = irte.irte.vector;
2500 irq->delivery_mode = irte.irte.delivery_mode;
2501 irq->dest = le32_to_cpu(irte.irte.dest_id);
2502 if (!iommu->intr_eime) {
2503#define VTD_IR_APIC_DEST_MASK (0xff00ULL)
2504#define VTD_IR_APIC_DEST_SHIFT (8)
2505 irq->dest = (irq->dest & VTD_IR_APIC_DEST_MASK) >>
2506 VTD_IR_APIC_DEST_SHIFT;
2507 }
2508 irq->dest_mode = irte.irte.dest_mode;
2509 irq->redir_hint = irte.irte.redir_hint;
2510
2511 trace_vtd_ir_remap(index, irq->trigger_mode, irq->vector,
2512 irq->delivery_mode, irq->dest, irq->dest_mode);
2513
2514 return 0;
2515}
2516
2517
2518static void vtd_generate_msi_message(VTDIrq *irq, MSIMessage *msg_out)
2519{
2520 VTD_MSIMessage msg = {};
2521
2522
2523 msg.dest_mode = irq->dest_mode;
2524 msg.redir_hint = irq->redir_hint;
2525 msg.dest = irq->dest;
2526 msg.__addr_hi = irq->dest & 0xffffff00;
2527 msg.__addr_head = cpu_to_le32(0xfee);
2528
2529 msg.__not_used = irq->msi_addr_last_bits;
2530
2531
2532 msg.vector = irq->vector;
2533 msg.delivery_mode = irq->delivery_mode;
2534 msg.level = 1;
2535 msg.trigger_mode = irq->trigger_mode;
2536
2537 msg_out->address = msg.msi_addr;
2538 msg_out->data = msg.msi_data;
2539}
2540
2541
2542static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu,
2543 MSIMessage *origin,
2544 MSIMessage *translated,
2545 uint16_t sid)
2546{
2547 int ret = 0;
2548 VTD_IR_MSIAddress addr;
2549 uint16_t index;
2550 VTDIrq irq = {};
2551
2552 assert(origin && translated);
2553
2554 trace_vtd_ir_remap_msi_req(origin->address, origin->data);
2555
2556 if (!iommu || !iommu->intr_enabled) {
2557 memcpy(translated, origin, sizeof(*origin));
2558 goto out;
2559 }
2560
2561 if (origin->address & VTD_MSI_ADDR_HI_MASK) {
2562 trace_vtd_err("MSI address high 32 bits non-zero when "
2563 "Interrupt Remapping enabled.");
2564 return -VTD_FR_IR_REQ_RSVD;
2565 }
2566
2567 addr.data = origin->address & VTD_MSI_ADDR_LO_MASK;
2568 if (addr.addr.__head != 0xfee) {
2569 trace_vtd_err("MSI addr low 32 bit invalid.");
2570 return -VTD_FR_IR_REQ_RSVD;
2571 }
2572
2573
2574 if (addr.addr.int_mode != VTD_IR_INT_FORMAT_REMAP) {
2575 memcpy(translated, origin, sizeof(*origin));
2576 goto out;
2577 }
2578
2579 index = addr.addr.index_h << 15 | le16_to_cpu(addr.addr.index_l);
2580
2581#define VTD_IR_MSI_DATA_SUBHANDLE (0x0000ffff)
2582#define VTD_IR_MSI_DATA_RESERVED (0xffff0000)
2583
2584 if (addr.addr.sub_valid) {
2585
2586 index += origin->data & VTD_IR_MSI_DATA_SUBHANDLE;
2587 }
2588
2589 ret = vtd_remap_irq_get(iommu, index, &irq, sid);
2590 if (ret) {
2591 return ret;
2592 }
2593
2594 if (addr.addr.sub_valid) {
2595 trace_vtd_ir_remap_type("MSI");
2596 if (origin->data & VTD_IR_MSI_DATA_RESERVED) {
2597 trace_vtd_err_ir_msi_invalid(sid, origin->address, origin->data);
2598 return -VTD_FR_IR_REQ_RSVD;
2599 }
2600 } else {
2601 uint8_t vector = origin->data & 0xff;
2602 uint8_t trigger_mode = (origin->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
2603
2604 trace_vtd_ir_remap_type("IOAPIC");
2605
2606
2607 if (vector != irq.vector) {
2608 trace_vtd_warn_ir_vector(sid, index, vector, irq.vector);
2609 }
2610
2611
2612
2613 if (trigger_mode != irq.trigger_mode) {
2614 trace_vtd_warn_ir_trigger(sid, index, trigger_mode,
2615 irq.trigger_mode);
2616 }
2617 }
2618
2619
2620
2621
2622
2623 irq.msi_addr_last_bits = addr.addr.__not_care;
2624
2625
2626 vtd_generate_msi_message(&irq, translated);
2627
2628out:
2629 trace_vtd_ir_remap_msi(origin->address, origin->data,
2630 translated->address, translated->data);
2631 return 0;
2632}
2633
2634static int vtd_int_remap(X86IOMMUState *iommu, MSIMessage *src,
2635 MSIMessage *dst, uint16_t sid)
2636{
2637 return vtd_interrupt_remap_msi(INTEL_IOMMU_DEVICE(iommu),
2638 src, dst, sid);
2639}
2640
2641static MemTxResult vtd_mem_ir_read(void *opaque, hwaddr addr,
2642 uint64_t *data, unsigned size,
2643 MemTxAttrs attrs)
2644{
2645 return MEMTX_OK;
2646}
2647
2648static MemTxResult vtd_mem_ir_write(void *opaque, hwaddr addr,
2649 uint64_t value, unsigned size,
2650 MemTxAttrs attrs)
2651{
2652 int ret = 0;
2653 MSIMessage from = {}, to = {};
2654 uint16_t sid = X86_IOMMU_SID_INVALID;
2655
2656 from.address = (uint64_t) addr + VTD_INTERRUPT_ADDR_FIRST;
2657 from.data = (uint32_t) value;
2658
2659 if (!attrs.unspecified) {
2660
2661 sid = attrs.requester_id;
2662 }
2663
2664 ret = vtd_interrupt_remap_msi(opaque, &from, &to, sid);
2665 if (ret) {
2666
2667
2668 return MEMTX_ERROR;
2669 }
2670
2671 apic_get_class()->send_msi(&to);
2672
2673 return MEMTX_OK;
2674}
2675
2676static const MemoryRegionOps vtd_mem_ir_ops = {
2677 .read_with_attrs = vtd_mem_ir_read,
2678 .write_with_attrs = vtd_mem_ir_write,
2679 .endianness = DEVICE_LITTLE_ENDIAN,
2680 .impl = {
2681 .min_access_size = 4,
2682 .max_access_size = 4,
2683 },
2684 .valid = {
2685 .min_access_size = 4,
2686 .max_access_size = 4,
2687 },
2688};
2689
2690VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn)
2691{
2692 uintptr_t key = (uintptr_t)bus;
2693 VTDBus *vtd_bus = g_hash_table_lookup(s->vtd_as_by_busptr, &key);
2694 VTDAddressSpace *vtd_dev_as;
2695 char name[128];
2696
2697 if (!vtd_bus) {
2698 uintptr_t *new_key = g_malloc(sizeof(*new_key));
2699 *new_key = (uintptr_t)bus;
2700
2701 vtd_bus = g_malloc0(sizeof(VTDBus) + sizeof(VTDAddressSpace *) * \
2702 X86_IOMMU_PCI_DEVFN_MAX);
2703 vtd_bus->bus = bus;
2704 g_hash_table_insert(s->vtd_as_by_busptr, new_key, vtd_bus);
2705 }
2706
2707 vtd_dev_as = vtd_bus->dev_as[devfn];
2708
2709 if (!vtd_dev_as) {
2710 snprintf(name, sizeof(name), "intel_iommu_devfn_%d", devfn);
2711 vtd_bus->dev_as[devfn] = vtd_dev_as = g_malloc0(sizeof(VTDAddressSpace));
2712
2713 vtd_dev_as->bus = bus;
2714 vtd_dev_as->devfn = (uint8_t)devfn;
2715 vtd_dev_as->iommu_state = s;
2716 vtd_dev_as->context_cache_entry.context_cache_gen = 0;
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735 memory_region_init_iommu(&vtd_dev_as->iommu, sizeof(vtd_dev_as->iommu),
2736 TYPE_INTEL_IOMMU_MEMORY_REGION, OBJECT(s),
2737 "intel_iommu_dmar",
2738 UINT64_MAX);
2739 memory_region_init_alias(&vtd_dev_as->sys_alias, OBJECT(s),
2740 "vtd_sys_alias", get_system_memory(),
2741 0, memory_region_size(get_system_memory()));
2742 memory_region_init_io(&vtd_dev_as->iommu_ir, OBJECT(s),
2743 &vtd_mem_ir_ops, s, "intel_iommu_ir",
2744 VTD_INTERRUPT_ADDR_SIZE);
2745 memory_region_init(&vtd_dev_as->root, OBJECT(s),
2746 "vtd_root", UINT64_MAX);
2747 memory_region_add_subregion_overlap(&vtd_dev_as->root,
2748 VTD_INTERRUPT_ADDR_FIRST,
2749 &vtd_dev_as->iommu_ir, 64);
2750 address_space_init(&vtd_dev_as->as, &vtd_dev_as->root, name);
2751 memory_region_add_subregion_overlap(&vtd_dev_as->root, 0,
2752 &vtd_dev_as->sys_alias, 1);
2753 memory_region_add_subregion_overlap(&vtd_dev_as->root, 0,
2754 MEMORY_REGION(&vtd_dev_as->iommu),
2755 1);
2756 vtd_switch_address_space(vtd_dev_as);
2757 }
2758 return vtd_dev_as;
2759}
2760
2761
2762static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n)
2763{
2764 IOMMUTLBEntry entry;
2765 hwaddr size;
2766 hwaddr start = n->start;
2767 hwaddr end = n->end;
2768
2769
2770
2771
2772
2773
2774
2775 if (end > VTD_ADDRESS_SIZE) {
2776
2777
2778
2779
2780 end = VTD_ADDRESS_SIZE;
2781 }
2782
2783 assert(start <= end);
2784 size = end - start;
2785
2786 if (ctpop64(size) != 1) {
2787
2788
2789
2790
2791 int n = 64 - clz64(size);
2792 if (n > VTD_MGAW) {
2793
2794 n = VTD_MGAW;
2795 }
2796 size = 1ULL << n;
2797 }
2798
2799 entry.target_as = &address_space_memory;
2800
2801 entry.iova = n->start & ~(size - 1);
2802
2803 entry.translated_addr = 0;
2804 entry.perm = IOMMU_NONE;
2805 entry.addr_mask = size - 1;
2806
2807 trace_vtd_as_unmap_whole(pci_bus_num(as->bus),
2808 VTD_PCI_SLOT(as->devfn),
2809 VTD_PCI_FUNC(as->devfn),
2810 entry.iova, size);
2811
2812 memory_region_notify_one(n, &entry);
2813}
2814
2815static void vtd_address_space_unmap_all(IntelIOMMUState *s)
2816{
2817 IntelIOMMUNotifierNode *node;
2818 VTDAddressSpace *vtd_as;
2819 IOMMUNotifier *n;
2820
2821 QLIST_FOREACH(node, &s->notifiers_list, next) {
2822 vtd_as = node->vtd_as;
2823 IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) {
2824 vtd_address_space_unmap(vtd_as, n);
2825 }
2826 }
2827}
2828
2829static int vtd_replay_hook(IOMMUTLBEntry *entry, void *private)
2830{
2831 memory_region_notify_one((IOMMUNotifier *)private, entry);
2832 return 0;
2833}
2834
2835static void vtd_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
2836{
2837 VTDAddressSpace *vtd_as = container_of(iommu_mr, VTDAddressSpace, iommu);
2838 IntelIOMMUState *s = vtd_as->iommu_state;
2839 uint8_t bus_n = pci_bus_num(vtd_as->bus);
2840 VTDContextEntry ce;
2841
2842
2843
2844
2845
2846
2847 vtd_address_space_unmap(vtd_as, n);
2848
2849 if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) {
2850 trace_vtd_replay_ce_valid(bus_n, PCI_SLOT(vtd_as->devfn),
2851 PCI_FUNC(vtd_as->devfn),
2852 VTD_CONTEXT_ENTRY_DID(ce.hi),
2853 ce.hi, ce.lo);
2854 vtd_page_walk(&ce, 0, ~0ULL, vtd_replay_hook, (void *)n, false);
2855 } else {
2856 trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn),
2857 PCI_FUNC(vtd_as->devfn));
2858 }
2859
2860 return;
2861}
2862
2863
2864
2865
2866static void vtd_init(IntelIOMMUState *s)
2867{
2868 X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
2869
2870 memset(s->csr, 0, DMAR_REG_SIZE);
2871 memset(s->wmask, 0, DMAR_REG_SIZE);
2872 memset(s->w1cmask, 0, DMAR_REG_SIZE);
2873 memset(s->womask, 0, DMAR_REG_SIZE);
2874
2875 s->root = 0;
2876 s->root_extended = false;
2877 s->dmar_enabled = false;
2878 s->iq_head = 0;
2879 s->iq_tail = 0;
2880 s->iq = 0;
2881 s->iq_size = 0;
2882 s->qi_enabled = false;
2883 s->iq_last_desc_type = VTD_INV_DESC_NONE;
2884 s->next_frcd_reg = 0;
2885 s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | VTD_CAP_MGAW |
2886 VTD_CAP_SAGAW | VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS;
2887 s->ecap = VTD_ECAP_QI | VTD_ECAP_IRO;
2888
2889 if (x86_iommu->intr_supported) {
2890 s->ecap |= VTD_ECAP_IR | VTD_ECAP_MHMV;
2891 if (s->intr_eim == ON_OFF_AUTO_ON) {
2892 s->ecap |= VTD_ECAP_EIM;
2893 }
2894 assert(s->intr_eim != ON_OFF_AUTO_AUTO);
2895 }
2896
2897 if (x86_iommu->dt_supported) {
2898 s->ecap |= VTD_ECAP_DT;
2899 }
2900
2901 if (x86_iommu->pt_supported) {
2902 s->ecap |= VTD_ECAP_PT;
2903 }
2904
2905 if (s->caching_mode) {
2906 s->cap |= VTD_CAP_CM;
2907 }
2908
2909 vtd_reset_context_cache(s);
2910 vtd_reset_iotlb(s);
2911
2912
2913 vtd_define_long(s, DMAR_VER_REG, 0x10UL, 0, 0);
2914 vtd_define_quad(s, DMAR_CAP_REG, s->cap, 0, 0);
2915 vtd_define_quad(s, DMAR_ECAP_REG, s->ecap, 0, 0);
2916 vtd_define_long(s, DMAR_GCMD_REG, 0, 0xff800000UL, 0);
2917 vtd_define_long_wo(s, DMAR_GCMD_REG, 0xff800000UL);
2918 vtd_define_long(s, DMAR_GSTS_REG, 0, 0, 0);
2919 vtd_define_quad(s, DMAR_RTADDR_REG, 0, 0xfffffffffffff000ULL, 0);
2920 vtd_define_quad(s, DMAR_CCMD_REG, 0, 0xe0000003ffffffffULL, 0);
2921 vtd_define_quad_wo(s, DMAR_CCMD_REG, 0x3ffff0000ULL);
2922
2923
2924 vtd_define_long(s, DMAR_FSTS_REG, 0, 0, 0x11UL);
2925 vtd_define_long(s, DMAR_FECTL_REG, 0x80000000UL, 0x80000000UL, 0);
2926 vtd_define_long(s, DMAR_FEDATA_REG, 0, 0x0000ffffUL, 0);
2927 vtd_define_long(s, DMAR_FEADDR_REG, 0, 0xfffffffcUL, 0);
2928
2929
2930
2931
2932 vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0, 0);
2933
2934
2935
2936
2937
2938 vtd_define_long(s, DMAR_PMEN_REG, 0, 0, 0);
2939
2940 vtd_define_quad(s, DMAR_IQH_REG, 0, 0, 0);
2941 vtd_define_quad(s, DMAR_IQT_REG, 0, 0x7fff0ULL, 0);
2942 vtd_define_quad(s, DMAR_IQA_REG, 0, 0xfffffffffffff007ULL, 0);
2943 vtd_define_long(s, DMAR_ICS_REG, 0, 0, 0x1UL);
2944 vtd_define_long(s, DMAR_IECTL_REG, 0x80000000UL, 0x80000000UL, 0);
2945 vtd_define_long(s, DMAR_IEDATA_REG, 0, 0xffffffffUL, 0);
2946 vtd_define_long(s, DMAR_IEADDR_REG, 0, 0xfffffffcUL, 0);
2947
2948 vtd_define_long(s, DMAR_IEUADDR_REG, 0, 0, 0);
2949
2950
2951 vtd_define_quad(s, DMAR_IOTLB_REG, 0, 0Xb003ffff00000000ULL, 0);
2952 vtd_define_quad(s, DMAR_IVA_REG, 0, 0xfffffffffffff07fULL, 0);
2953 vtd_define_quad_wo(s, DMAR_IVA_REG, 0xfffffffffffff07fULL);
2954
2955
2956 vtd_define_quad(s, DMAR_FRCD_REG_0_0, 0, 0, 0);
2957 vtd_define_quad(s, DMAR_FRCD_REG_0_2, 0, 0, 0x8000000000000000ULL);
2958
2959
2960
2961
2962 vtd_define_quad(s, DMAR_IRTA_REG, 0, 0xfffffffffffff80fULL, 0);
2963}
2964
2965
2966
2967
2968static void vtd_reset(DeviceState *dev)
2969{
2970 IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
2971
2972 vtd_init(s);
2973
2974
2975
2976
2977 vtd_address_space_unmap_all(s);
2978}
2979
2980static AddressSpace *vtd_host_dma_iommu(PCIBus *bus, void *opaque, int devfn)
2981{
2982 IntelIOMMUState *s = opaque;
2983 VTDAddressSpace *vtd_as;
2984
2985 assert(0 <= devfn && devfn < X86_IOMMU_PCI_DEVFN_MAX);
2986
2987 vtd_as = vtd_find_add_as(s, bus, devfn);
2988 return &vtd_as->as;
2989}
2990
2991static bool vtd_decide_config(IntelIOMMUState *s, Error **errp)
2992{
2993 X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
2994
2995
2996 if (x86_iommu->intr_supported && kvm_irqchip_in_kernel() &&
2997 !kvm_irqchip_is_split()) {
2998 error_setg(errp, "Intel Interrupt Remapping cannot work with "
2999 "kernel-irqchip=on, please use 'split|off'.");
3000 return false;
3001 }
3002 if (s->intr_eim == ON_OFF_AUTO_ON && !x86_iommu->intr_supported) {
3003 error_setg(errp, "eim=on cannot be selected without intremap=on");
3004 return false;
3005 }
3006
3007 if (s->intr_eim == ON_OFF_AUTO_AUTO) {
3008 s->intr_eim = (kvm_irqchip_in_kernel() || s->buggy_eim)
3009 && x86_iommu->intr_supported ?
3010 ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
3011 }
3012 if (s->intr_eim == ON_OFF_AUTO_ON && !s->buggy_eim) {
3013 if (!kvm_irqchip_in_kernel()) {
3014 error_setg(errp, "eim=on requires accel=kvm,kernel-irqchip=split");
3015 return false;
3016 }
3017 if (!kvm_enable_x2apic()) {
3018 error_setg(errp, "eim=on requires support on the KVM side"
3019 "(X2APIC_API, first shipped in v4.7)");
3020 return false;
3021 }
3022 }
3023
3024 return true;
3025}
3026
3027static void vtd_realize(DeviceState *dev, Error **errp)
3028{
3029 MachineState *ms = MACHINE(qdev_get_machine());
3030 MachineClass *mc = MACHINE_GET_CLASS(ms);
3031 PCMachineState *pcms =
3032 PC_MACHINE(object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE));
3033 PCIBus *bus;
3034 IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
3035 X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev);
3036
3037 if (!pcms) {
3038 error_setg(errp, "Machine-type '%s' not supported by intel-iommu",
3039 mc->name);
3040 return;
3041 }
3042
3043 bus = pcms->bus;
3044 x86_iommu->type = TYPE_INTEL;
3045
3046 if (!vtd_decide_config(s, errp)) {
3047 return;
3048 }
3049
3050 QLIST_INIT(&s->notifiers_list);
3051 memset(s->vtd_as_by_bus_num, 0, sizeof(s->vtd_as_by_bus_num));
3052 memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s,
3053 "intel_iommu", DMAR_REG_SIZE);
3054 sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->csrmem);
3055
3056 s->iotlb = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal,
3057 g_free, g_free);
3058 s->vtd_as_by_busptr = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal,
3059 g_free, g_free);
3060 vtd_init(s);
3061 sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, Q35_HOST_BRIDGE_IOMMU_ADDR);
3062 pci_setup_iommu(bus, vtd_host_dma_iommu, dev);
3063
3064 pcms->ioapic_as = vtd_host_dma_iommu(bus, s, Q35_PSEUDO_DEVFN_IOAPIC);
3065}
3066
3067static void vtd_class_init(ObjectClass *klass, void *data)
3068{
3069 DeviceClass *dc = DEVICE_CLASS(klass);
3070 X86IOMMUClass *x86_class = X86_IOMMU_CLASS(klass);
3071
3072 dc->reset = vtd_reset;
3073 dc->vmsd = &vtd_vmstate;
3074 dc->props = vtd_properties;
3075 dc->hotpluggable = false;
3076 x86_class->realize = vtd_realize;
3077 x86_class->int_remap = vtd_int_remap;
3078
3079 dc->user_creatable = true;
3080}
3081
3082static const TypeInfo vtd_info = {
3083 .name = TYPE_INTEL_IOMMU_DEVICE,
3084 .parent = TYPE_X86_IOMMU_DEVICE,
3085 .instance_size = sizeof(IntelIOMMUState),
3086 .class_init = vtd_class_init,
3087};
3088
3089static void vtd_iommu_memory_region_class_init(ObjectClass *klass,
3090 void *data)
3091{
3092 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
3093
3094 imrc->translate = vtd_iommu_translate;
3095 imrc->notify_flag_changed = vtd_iommu_notify_flag_changed;
3096 imrc->replay = vtd_iommu_replay;
3097}
3098
3099static const TypeInfo vtd_iommu_memory_region_info = {
3100 .parent = TYPE_IOMMU_MEMORY_REGION,
3101 .name = TYPE_INTEL_IOMMU_MEMORY_REGION,
3102 .class_init = vtd_iommu_memory_region_class_init,
3103};
3104
3105static void vtd_register_types(void)
3106{
3107 type_register_static(&vtd_info);
3108 type_register_static(&vtd_iommu_memory_region_info);
3109}
3110
3111type_init(vtd_register_types)
3112