1
2
3#include <linux/irqchip/arm-gic-v3.h>
4#include <linux/irq.h>
5#include <linux/irqdomain.h>
6#include <linux/kvm.h>
7#include <linux/kvm_host.h>
8#include <kvm/arm_vgic.h>
9#include <asm/kvm_hyp.h>
10#include <asm/kvm_mmu.h>
11#include <asm/kvm_asm.h>
12
13#include "vgic.h"
14
15static bool group0_trap;
16static bool group1_trap;
17static bool common_trap;
18static bool gicv4_enable;
19
20void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
21{
22 struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
23
24 cpuif->vgic_hcr |= ICH_HCR_UIE;
25}
26
27static bool lr_signals_eoi_mi(u64 lr_val)
28{
29 return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) &&
30 !(lr_val & ICH_LR_HW);
31}
32
33void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
34{
35 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
36 struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
37 u32 model = vcpu->kvm->arch.vgic.vgic_model;
38 int lr;
39
40 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
41
42 cpuif->vgic_hcr &= ~ICH_HCR_UIE;
43
44 for (lr = 0; lr < cpuif->used_lrs; lr++) {
45 u64 val = cpuif->vgic_lr[lr];
46 u32 intid, cpuid;
47 struct vgic_irq *irq;
48 bool is_v2_sgi = false;
49
50 cpuid = val & GICH_LR_PHYSID_CPUID;
51 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
52
53 if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
54 intid = val & ICH_LR_VIRTUAL_ID_MASK;
55 } else {
56 intid = val & GICH_LR_VIRTUALID;
57 is_v2_sgi = vgic_irq_is_sgi(intid);
58 }
59
60
61 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
62 kvm_notify_acked_irq(vcpu->kvm, 0,
63 intid - VGIC_NR_PRIVATE_IRQS);
64
65 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
66 if (!irq)
67 continue;
68
69 raw_spin_lock(&irq->irq_lock);
70
71
72 irq->active = !!(val & ICH_LR_ACTIVE_BIT);
73
74 if (irq->active && is_v2_sgi)
75 irq->active_source = cpuid;
76
77
78 if (irq->config == VGIC_CONFIG_EDGE &&
79 (val & ICH_LR_PENDING_BIT)) {
80 irq->pending_latch = true;
81
82 if (is_v2_sgi)
83 irq->source |= (1 << cpuid);
84 }
85
86
87
88
89 if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
90 irq->pending_latch = false;
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105 if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT)) {
106 irq->line_level = vgic_get_phys_line_level(irq);
107
108 if (!irq->line_level)
109 vgic_irq_set_phys_active(irq, false);
110 }
111
112 raw_spin_unlock(&irq->irq_lock);
113 vgic_put_irq(vcpu->kvm, irq);
114 }
115
116 cpuif->used_lrs = 0;
117}
118
119
120void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
121{
122 u32 model = vcpu->kvm->arch.vgic.vgic_model;
123 u64 val = irq->intid;
124 bool allow_pending = true, is_v2_sgi;
125
126 is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
127 model == KVM_DEV_TYPE_ARM_VGIC_V2);
128
129 if (irq->active) {
130 val |= ICH_LR_ACTIVE_BIT;
131 if (is_v2_sgi)
132 val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
133 if (vgic_irq_is_multi_sgi(irq)) {
134 allow_pending = false;
135 val |= ICH_LR_EOI;
136 }
137 }
138
139 if (irq->hw) {
140 val |= ICH_LR_HW;
141 val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
142
143
144
145
146
147 if (irq->active)
148 allow_pending = false;
149 } else {
150 if (irq->config == VGIC_CONFIG_LEVEL) {
151 val |= ICH_LR_EOI;
152
153
154
155
156
157 if (irq->active)
158 allow_pending = false;
159 }
160 }
161
162 if (allow_pending && irq_is_pending(irq)) {
163 val |= ICH_LR_PENDING_BIT;
164
165 if (irq->config == VGIC_CONFIG_EDGE)
166 irq->pending_latch = false;
167
168 if (vgic_irq_is_sgi(irq->intid) &&
169 model == KVM_DEV_TYPE_ARM_VGIC_V2) {
170 u32 src = ffs(irq->source);
171
172 if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
173 irq->intid))
174 return;
175
176 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
177 irq->source &= ~(1 << (src - 1));
178 if (irq->source) {
179 irq->pending_latch = true;
180 val |= ICH_LR_EOI;
181 }
182 }
183 }
184
185
186
187
188
189
190
191 if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT))
192 irq->line_level = false;
193
194 if (irq->group)
195 val |= ICH_LR_GROUP;
196
197 val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
198
199 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
200}
201
202void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
203{
204 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
205}
206
207void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
208{
209 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
210 u32 model = vcpu->kvm->arch.vgic.vgic_model;
211 u32 vmcr;
212
213 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
214 vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
215 ICH_VMCR_ACK_CTL_MASK;
216 vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
217 ICH_VMCR_FIQ_EN_MASK;
218 } else {
219
220
221
222
223 vmcr = ICH_VMCR_FIQ_EN_MASK;
224 }
225
226 vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
227 vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
228 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
229 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
230 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
231 vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK;
232 vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK;
233
234 cpu_if->vgic_vmcr = vmcr;
235}
236
237void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
238{
239 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
240 u32 model = vcpu->kvm->arch.vgic.vgic_model;
241 u32 vmcr;
242
243 vmcr = cpu_if->vgic_vmcr;
244
245 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
246 vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
247 ICH_VMCR_ACK_CTL_SHIFT;
248 vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
249 ICH_VMCR_FIQ_EN_SHIFT;
250 } else {
251
252
253
254
255 vmcrp->fiqen = 1;
256 vmcrp->ackctl = 0;
257 }
258
259 vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
260 vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
261 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
262 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
263 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
264 vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT;
265 vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT;
266}
267
268#define INITIAL_PENDBASER_VALUE \
269 (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \
270 GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \
271 GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
272
273void vgic_v3_enable(struct kvm_vcpu *vcpu)
274{
275 struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
276
277
278
279
280
281
282 vgic_v3->vgic_vmcr = 0;
283
284
285
286
287
288
289
290 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
291 vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB |
292 ICC_SRE_EL1_DFB |
293 ICC_SRE_EL1_SRE);
294 vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
295 } else {
296 vgic_v3->vgic_sre = 0;
297 }
298
299 vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 &
300 ICH_VTR_ID_BITS_MASK) >>
301 ICH_VTR_ID_BITS_SHIFT;
302 vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 &
303 ICH_VTR_PRI_BITS_MASK) >>
304 ICH_VTR_PRI_BITS_SHIFT) + 1;
305
306
307 vgic_v3->vgic_hcr = ICH_HCR_EN;
308 if (group0_trap)
309 vgic_v3->vgic_hcr |= ICH_HCR_TALL0;
310 if (group1_trap)
311 vgic_v3->vgic_hcr |= ICH_HCR_TALL1;
312 if (common_trap)
313 vgic_v3->vgic_hcr |= ICH_HCR_TC;
314}
315
316int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
317{
318 struct kvm_vcpu *vcpu;
319 int byte_offset, bit_nr;
320 gpa_t pendbase, ptr;
321 bool status;
322 u8 val;
323 int ret;
324 unsigned long flags;
325
326retry:
327 vcpu = irq->target_vcpu;
328 if (!vcpu)
329 return 0;
330
331 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
332
333 byte_offset = irq->intid / BITS_PER_BYTE;
334 bit_nr = irq->intid % BITS_PER_BYTE;
335 ptr = pendbase + byte_offset;
336
337 ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
338 if (ret)
339 return ret;
340
341 status = val & (1 << bit_nr);
342
343 raw_spin_lock_irqsave(&irq->irq_lock, flags);
344 if (irq->target_vcpu != vcpu) {
345 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
346 goto retry;
347 }
348 irq->pending_latch = status;
349 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
350
351 if (status) {
352
353 val &= ~(1 << bit_nr);
354 ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
355 if (ret)
356 return ret;
357 }
358 return 0;
359}
360
361
362
363
364
365static void unmap_all_vpes(struct vgic_dist *dist)
366{
367 struct irq_desc *desc;
368 int i;
369
370 for (i = 0; i < dist->its_vm.nr_vpes; i++) {
371 desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
372 irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
373 }
374}
375
376static void map_all_vpes(struct vgic_dist *dist)
377{
378 struct irq_desc *desc;
379 int i;
380
381 for (i = 0; i < dist->its_vm.nr_vpes; i++) {
382 desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
383 irq_domain_activate_irq(irq_desc_get_irq_data(desc), false);
384 }
385}
386
387
388
389
390
391int vgic_v3_save_pending_tables(struct kvm *kvm)
392{
393 struct vgic_dist *dist = &kvm->arch.vgic;
394 struct vgic_irq *irq;
395 gpa_t last_ptr = ~(gpa_t)0;
396 bool vlpi_avail = false;
397 int ret = 0;
398 u8 val;
399
400 if (unlikely(!vgic_initialized(kvm)))
401 return -ENXIO;
402
403
404
405
406
407
408 if (kvm_vgic_global_state.has_gicv4_1) {
409 unmap_all_vpes(dist);
410 vlpi_avail = true;
411 }
412
413 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
414 int byte_offset, bit_nr;
415 struct kvm_vcpu *vcpu;
416 gpa_t pendbase, ptr;
417 bool is_pending;
418 bool stored;
419
420 vcpu = irq->target_vcpu;
421 if (!vcpu)
422 continue;
423
424 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
425
426 byte_offset = irq->intid / BITS_PER_BYTE;
427 bit_nr = irq->intid % BITS_PER_BYTE;
428 ptr = pendbase + byte_offset;
429
430 if (ptr != last_ptr) {
431 ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
432 if (ret)
433 goto out;
434 last_ptr = ptr;
435 }
436
437 stored = val & (1U << bit_nr);
438
439 is_pending = irq->pending_latch;
440
441 if (irq->hw && vlpi_avail)
442 vgic_v4_get_vlpi_state(irq, &is_pending);
443
444 if (stored == is_pending)
445 continue;
446
447 if (is_pending)
448 val |= 1 << bit_nr;
449 else
450 val &= ~(1 << bit_nr);
451
452 ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
453 if (ret)
454 goto out;
455 }
456
457out:
458 if (vlpi_avail)
459 map_all_vpes(dist);
460
461 return ret;
462}
463
464
465
466
467
468
469
470
471
472
473
474bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size)
475{
476 struct vgic_dist *d = &kvm->arch.vgic;
477 struct vgic_redist_region *rdreg;
478
479 list_for_each_entry(rdreg, &d->rd_regions, list) {
480 if ((base + size > rdreg->base) &&
481 (base < rdreg->base + vgic_v3_rd_region_size(kvm, rdreg)))
482 return true;
483 }
484 return false;
485}
486
487
488
489
490
491bool vgic_v3_check_base(struct kvm *kvm)
492{
493 struct vgic_dist *d = &kvm->arch.vgic;
494 struct vgic_redist_region *rdreg;
495
496 if (!IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
497 d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base)
498 return false;
499
500 list_for_each_entry(rdreg, &d->rd_regions, list) {
501 if (rdreg->base + vgic_v3_rd_region_size(kvm, rdreg) <
502 rdreg->base)
503 return false;
504 }
505
506 if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base))
507 return true;
508
509 return !vgic_v3_rdist_overlap(kvm, d->vgic_dist_base,
510 KVM_VGIC_V3_DIST_SIZE);
511}
512
513
514
515
516
517
518
519
520
521
522
523
524
525struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rd_regions)
526{
527 struct vgic_redist_region *rdreg;
528
529 list_for_each_entry(rdreg, rd_regions, list) {
530 if (!vgic_v3_redist_region_full(rdreg))
531 return rdreg;
532 }
533 return NULL;
534}
535
536struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
537 u32 index)
538{
539 struct list_head *rd_regions = &kvm->arch.vgic.rd_regions;
540 struct vgic_redist_region *rdreg;
541
542 list_for_each_entry(rdreg, rd_regions, list) {
543 if (rdreg->index == index)
544 return rdreg;
545 }
546 return NULL;
547}
548
549
550int vgic_v3_map_resources(struct kvm *kvm)
551{
552 struct vgic_dist *dist = &kvm->arch.vgic;
553 struct kvm_vcpu *vcpu;
554 int ret = 0;
555 int c;
556
557 kvm_for_each_vcpu(c, vcpu, kvm) {
558 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
559
560 if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
561 kvm_debug("vcpu %d redistributor base not set\n", c);
562 return -ENXIO;
563 }
564 }
565
566 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
567 kvm_err("Need to set vgic distributor addresses first\n");
568 return -ENXIO;
569 }
570
571 if (!vgic_v3_check_base(kvm)) {
572 kvm_err("VGIC redist and dist frames overlap\n");
573 return -EINVAL;
574 }
575
576
577
578
579
580 if (!vgic_initialized(kvm)) {
581 return -EBUSY;
582 }
583
584 ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
585 if (ret) {
586 kvm_err("Unable to register VGICv3 dist MMIO regions\n");
587 return ret;
588 }
589
590 if (kvm_vgic_global_state.has_gicv4_1)
591 vgic_v4_configure_vsgis(kvm);
592
593 return 0;
594}
595
596DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
597
598static int __init early_group0_trap_cfg(char *buf)
599{
600 return strtobool(buf, &group0_trap);
601}
602early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg);
603
604static int __init early_group1_trap_cfg(char *buf)
605{
606 return strtobool(buf, &group1_trap);
607}
608early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg);
609
610static int __init early_common_trap_cfg(char *buf)
611{
612 return strtobool(buf, &common_trap);
613}
614early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
615
616static int __init early_gicv4_enable(char *buf)
617{
618 return strtobool(buf, &gicv4_enable);
619}
620early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
621
622
623
624
625
626
627
628
629int vgic_v3_probe(const struct gic_kvm_info *info)
630{
631 u64 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config);
632 bool has_v2;
633 int ret;
634
635 has_v2 = ich_vtr_el2 >> 63;
636 ich_vtr_el2 = (u32)ich_vtr_el2;
637
638
639
640
641
642 kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
643 kvm_vgic_global_state.can_emulate_gicv2 = false;
644 kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
645
646
647 if (info->has_v4) {
648 kvm_vgic_global_state.has_gicv4 = gicv4_enable;
649 kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
650 kvm_info("GICv4%s support %sabled\n",
651 kvm_vgic_global_state.has_gicv4_1 ? ".1" : "",
652 gicv4_enable ? "en" : "dis");
653 }
654
655 kvm_vgic_global_state.vcpu_base = 0;
656
657 if (!info->vcpu.start) {
658 kvm_info("GICv3: no GICV resource entry\n");
659 } else if (!has_v2) {
660 pr_warn(FW_BUG "CPU interface incapable of MMIO access\n");
661 } else if (!PAGE_ALIGNED(info->vcpu.start)) {
662 pr_warn("GICV physical address 0x%llx not page aligned\n",
663 (unsigned long long)info->vcpu.start);
664 } else {
665 kvm_vgic_global_state.vcpu_base = info->vcpu.start;
666 kvm_vgic_global_state.can_emulate_gicv2 = true;
667 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
668 if (ret) {
669 kvm_err("Cannot register GICv2 KVM device.\n");
670 return ret;
671 }
672 kvm_info("vgic-v2@%llx\n", info->vcpu.start);
673 }
674 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
675 if (ret) {
676 kvm_err("Cannot register GICv3 KVM device.\n");
677 kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2);
678 return ret;
679 }
680
681 if (kvm_vgic_global_state.vcpu_base == 0)
682 kvm_info("disabling GICv2 emulation\n");
683
684 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
685 group0_trap = true;
686 group1_trap = true;
687 }
688
689 if (group0_trap || group1_trap || common_trap) {
690 kvm_info("GICv3 sysreg trapping enabled ([%s%s%s], reduced performance)\n",
691 group0_trap ? "G0" : "",
692 group1_trap ? "G1" : "",
693 common_trap ? "C" : "");
694 static_branch_enable(&vgic_v3_cpuif_trap);
695 }
696
697 kvm_vgic_global_state.vctrl_base = NULL;
698 kvm_vgic_global_state.type = VGIC_V3;
699 kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS;
700
701 return 0;
702}
703
704void vgic_v3_load(struct kvm_vcpu *vcpu)
705{
706 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
707
708
709
710
711
712
713 if (likely(cpu_if->vgic_sre))
714 kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
715
716 kvm_call_hyp(__vgic_v3_restore_aprs, cpu_if);
717
718 if (has_vhe())
719 __vgic_v3_activate_traps(cpu_if);
720
721 WARN_ON(vgic_v4_load(vcpu));
722}
723
724void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
725{
726 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
727
728 if (likely(cpu_if->vgic_sre))
729 cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
730}
731
732void vgic_v3_put(struct kvm_vcpu *vcpu)
733{
734 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
735
736 WARN_ON(vgic_v4_put(vcpu, false));
737
738 vgic_v3_vmcr_sync(vcpu);
739
740 kvm_call_hyp(__vgic_v3_save_aprs, cpu_if);
741
742 if (has_vhe())
743 __vgic_v3_deactivate_traps(cpu_if);
744}
745