1
2
3#include <linux/irqchip/arm-gic-v3.h>
4#include <linux/irq.h>
5#include <linux/irqdomain.h>
6#include <linux/kvm.h>
7#include <linux/kvm_host.h>
8#include <kvm/arm_vgic.h>
9#include <asm/kvm_hyp.h>
10#include <asm/kvm_mmu.h>
11#include <asm/kvm_asm.h>
12
13#include "vgic.h"
14
15static bool group0_trap;
16static bool group1_trap;
17static bool common_trap;
18static bool gicv4_enable;
19
20void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
21{
22 struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
23
24 cpuif->vgic_hcr |= ICH_HCR_UIE;
25}
26
27static bool lr_signals_eoi_mi(u64 lr_val)
28{
29 return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) &&
30 !(lr_val & ICH_LR_HW);
31}
32
33void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
34{
35 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
36 struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
37 u32 model = vcpu->kvm->arch.vgic.vgic_model;
38 int lr;
39
40 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
41
42 cpuif->vgic_hcr &= ~ICH_HCR_UIE;
43
44 for (lr = 0; lr < cpuif->used_lrs; lr++) {
45 u64 val = cpuif->vgic_lr[lr];
46 u32 intid, cpuid;
47 struct vgic_irq *irq;
48 bool is_v2_sgi = false;
49 bool deactivated;
50
51 cpuid = val & GICH_LR_PHYSID_CPUID;
52 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
53
54 if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
55 intid = val & ICH_LR_VIRTUAL_ID_MASK;
56 } else {
57 intid = val & GICH_LR_VIRTUALID;
58 is_v2_sgi = vgic_irq_is_sgi(intid);
59 }
60
61
62 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
63 kvm_notify_acked_irq(vcpu->kvm, 0,
64 intid - VGIC_NR_PRIVATE_IRQS);
65
66 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
67 if (!irq)
68 continue;
69
70 raw_spin_lock(&irq->irq_lock);
71
72
73 deactivated = irq->active && !(val & ICH_LR_ACTIVE_BIT);
74 irq->active = !!(val & ICH_LR_ACTIVE_BIT);
75
76 if (irq->active && is_v2_sgi)
77 irq->active_source = cpuid;
78
79
80 if (irq->config == VGIC_CONFIG_EDGE &&
81 (val & ICH_LR_PENDING_BIT)) {
82 irq->pending_latch = true;
83
84 if (is_v2_sgi)
85 irq->source |= (1 << cpuid);
86 }
87
88
89
90
91 if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
92 irq->pending_latch = false;
93
94
95 vgic_irq_handle_resampling(irq, deactivated, val & ICH_LR_PENDING_BIT);
96
97 raw_spin_unlock(&irq->irq_lock);
98 vgic_put_irq(vcpu->kvm, irq);
99 }
100
101 cpuif->used_lrs = 0;
102}
103
104
105void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
106{
107 u32 model = vcpu->kvm->arch.vgic.vgic_model;
108 u64 val = irq->intid;
109 bool allow_pending = true, is_v2_sgi;
110
111 is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
112 model == KVM_DEV_TYPE_ARM_VGIC_V2);
113
114 if (irq->active) {
115 val |= ICH_LR_ACTIVE_BIT;
116 if (is_v2_sgi)
117 val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
118 if (vgic_irq_is_multi_sgi(irq)) {
119 allow_pending = false;
120 val |= ICH_LR_EOI;
121 }
122 }
123
124 if (irq->hw && !vgic_irq_needs_resampling(irq)) {
125 val |= ICH_LR_HW;
126 val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
127
128
129
130
131
132 if (irq->active)
133 allow_pending = false;
134 } else {
135 if (irq->config == VGIC_CONFIG_LEVEL) {
136 val |= ICH_LR_EOI;
137
138
139
140
141
142 if (irq->active)
143 allow_pending = false;
144 }
145 }
146
147 if (allow_pending && irq_is_pending(irq)) {
148 val |= ICH_LR_PENDING_BIT;
149
150 if (irq->config == VGIC_CONFIG_EDGE)
151 irq->pending_latch = false;
152
153 if (vgic_irq_is_sgi(irq->intid) &&
154 model == KVM_DEV_TYPE_ARM_VGIC_V2) {
155 u32 src = ffs(irq->source);
156
157 if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
158 irq->intid))
159 return;
160
161 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
162 irq->source &= ~(1 << (src - 1));
163 if (irq->source) {
164 irq->pending_latch = true;
165 val |= ICH_LR_EOI;
166 }
167 }
168 }
169
170
171
172
173
174
175
176 if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT))
177 irq->line_level = false;
178
179 if (irq->group)
180 val |= ICH_LR_GROUP;
181
182 val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
183
184 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
185}
186
187void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
188{
189 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
190}
191
192void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
193{
194 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
195 u32 model = vcpu->kvm->arch.vgic.vgic_model;
196 u32 vmcr;
197
198 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
199 vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
200 ICH_VMCR_ACK_CTL_MASK;
201 vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
202 ICH_VMCR_FIQ_EN_MASK;
203 } else {
204
205
206
207
208 vmcr = ICH_VMCR_FIQ_EN_MASK;
209 }
210
211 vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
212 vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
213 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
214 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
215 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
216 vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK;
217 vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK;
218
219 cpu_if->vgic_vmcr = vmcr;
220}
221
222void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
223{
224 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
225 u32 model = vcpu->kvm->arch.vgic.vgic_model;
226 u32 vmcr;
227
228 vmcr = cpu_if->vgic_vmcr;
229
230 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
231 vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
232 ICH_VMCR_ACK_CTL_SHIFT;
233 vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
234 ICH_VMCR_FIQ_EN_SHIFT;
235 } else {
236
237
238
239
240 vmcrp->fiqen = 1;
241 vmcrp->ackctl = 0;
242 }
243
244 vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
245 vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
246 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
247 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
248 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
249 vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT;
250 vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT;
251}
252
253#define INITIAL_PENDBASER_VALUE \
254 (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \
255 GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \
256 GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
257
258void vgic_v3_enable(struct kvm_vcpu *vcpu)
259{
260 struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
261
262
263
264
265
266
267 vgic_v3->vgic_vmcr = 0;
268
269
270
271
272
273
274
275 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
276 vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB |
277 ICC_SRE_EL1_DFB |
278 ICC_SRE_EL1_SRE);
279 vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
280 } else {
281 vgic_v3->vgic_sre = 0;
282 }
283
284 vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 &
285 ICH_VTR_ID_BITS_MASK) >>
286 ICH_VTR_ID_BITS_SHIFT;
287 vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 &
288 ICH_VTR_PRI_BITS_MASK) >>
289 ICH_VTR_PRI_BITS_SHIFT) + 1;
290
291
292 vgic_v3->vgic_hcr = ICH_HCR_EN;
293 if (group0_trap)
294 vgic_v3->vgic_hcr |= ICH_HCR_TALL0;
295 if (group1_trap)
296 vgic_v3->vgic_hcr |= ICH_HCR_TALL1;
297 if (common_trap)
298 vgic_v3->vgic_hcr |= ICH_HCR_TC;
299}
300
301int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
302{
303 struct kvm_vcpu *vcpu;
304 int byte_offset, bit_nr;
305 gpa_t pendbase, ptr;
306 bool status;
307 u8 val;
308 int ret;
309 unsigned long flags;
310
311retry:
312 vcpu = irq->target_vcpu;
313 if (!vcpu)
314 return 0;
315
316 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
317
318 byte_offset = irq->intid / BITS_PER_BYTE;
319 bit_nr = irq->intid % BITS_PER_BYTE;
320 ptr = pendbase + byte_offset;
321
322 ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
323 if (ret)
324 return ret;
325
326 status = val & (1 << bit_nr);
327
328 raw_spin_lock_irqsave(&irq->irq_lock, flags);
329 if (irq->target_vcpu != vcpu) {
330 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
331 goto retry;
332 }
333 irq->pending_latch = status;
334 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
335
336 if (status) {
337
338 val &= ~(1 << bit_nr);
339 ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
340 if (ret)
341 return ret;
342 }
343 return 0;
344}
345
346
347
348
349
350static void unmap_all_vpes(struct vgic_dist *dist)
351{
352 struct irq_desc *desc;
353 int i;
354
355 for (i = 0; i < dist->its_vm.nr_vpes; i++) {
356 desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
357 irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
358 }
359}
360
361static void map_all_vpes(struct vgic_dist *dist)
362{
363 struct irq_desc *desc;
364 int i;
365
366 for (i = 0; i < dist->its_vm.nr_vpes; i++) {
367 desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
368 irq_domain_activate_irq(irq_desc_get_irq_data(desc), false);
369 }
370}
371
372
373
374
375
376int vgic_v3_save_pending_tables(struct kvm *kvm)
377{
378 struct vgic_dist *dist = &kvm->arch.vgic;
379 struct vgic_irq *irq;
380 gpa_t last_ptr = ~(gpa_t)0;
381 bool vlpi_avail = false;
382 int ret = 0;
383 u8 val;
384
385 if (unlikely(!vgic_initialized(kvm)))
386 return -ENXIO;
387
388
389
390
391
392
393 if (kvm_vgic_global_state.has_gicv4_1) {
394 unmap_all_vpes(dist);
395 vlpi_avail = true;
396 }
397
398 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
399 int byte_offset, bit_nr;
400 struct kvm_vcpu *vcpu;
401 gpa_t pendbase, ptr;
402 bool is_pending;
403 bool stored;
404
405 vcpu = irq->target_vcpu;
406 if (!vcpu)
407 continue;
408
409 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
410
411 byte_offset = irq->intid / BITS_PER_BYTE;
412 bit_nr = irq->intid % BITS_PER_BYTE;
413 ptr = pendbase + byte_offset;
414
415 if (ptr != last_ptr) {
416 ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
417 if (ret)
418 goto out;
419 last_ptr = ptr;
420 }
421
422 stored = val & (1U << bit_nr);
423
424 is_pending = irq->pending_latch;
425
426 if (irq->hw && vlpi_avail)
427 vgic_v4_get_vlpi_state(irq, &is_pending);
428
429 if (stored == is_pending)
430 continue;
431
432 if (is_pending)
433 val |= 1 << bit_nr;
434 else
435 val &= ~(1 << bit_nr);
436
437 ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
438 if (ret)
439 goto out;
440 }
441
442out:
443 if (vlpi_avail)
444 map_all_vpes(dist);
445
446 return ret;
447}
448
449
450
451
452
453
454
455
456
457
458
459bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size)
460{
461 struct vgic_dist *d = &kvm->arch.vgic;
462 struct vgic_redist_region *rdreg;
463
464 list_for_each_entry(rdreg, &d->rd_regions, list) {
465 if ((base + size > rdreg->base) &&
466 (base < rdreg->base + vgic_v3_rd_region_size(kvm, rdreg)))
467 return true;
468 }
469 return false;
470}
471
472
473
474
475
476bool vgic_v3_check_base(struct kvm *kvm)
477{
478 struct vgic_dist *d = &kvm->arch.vgic;
479 struct vgic_redist_region *rdreg;
480
481 if (!IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
482 d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base)
483 return false;
484
485 list_for_each_entry(rdreg, &d->rd_regions, list) {
486 if (rdreg->base + vgic_v3_rd_region_size(kvm, rdreg) <
487 rdreg->base)
488 return false;
489 }
490
491 if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base))
492 return true;
493
494 return !vgic_v3_rdist_overlap(kvm, d->vgic_dist_base,
495 KVM_VGIC_V3_DIST_SIZE);
496}
497
498
499
500
501
502
503
504
505
506
507
508
509
510struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rd_regions)
511{
512 struct vgic_redist_region *rdreg;
513
514 list_for_each_entry(rdreg, rd_regions, list) {
515 if (!vgic_v3_redist_region_full(rdreg))
516 return rdreg;
517 }
518 return NULL;
519}
520
521struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
522 u32 index)
523{
524 struct list_head *rd_regions = &kvm->arch.vgic.rd_regions;
525 struct vgic_redist_region *rdreg;
526
527 list_for_each_entry(rdreg, rd_regions, list) {
528 if (rdreg->index == index)
529 return rdreg;
530 }
531 return NULL;
532}
533
534
535int vgic_v3_map_resources(struct kvm *kvm)
536{
537 struct vgic_dist *dist = &kvm->arch.vgic;
538 struct kvm_vcpu *vcpu;
539 int ret = 0;
540 int c;
541
542 kvm_for_each_vcpu(c, vcpu, kvm) {
543 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
544
545 if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
546 kvm_debug("vcpu %d redistributor base not set\n", c);
547 return -ENXIO;
548 }
549 }
550
551 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
552 kvm_err("Need to set vgic distributor addresses first\n");
553 return -ENXIO;
554 }
555
556 if (!vgic_v3_check_base(kvm)) {
557 kvm_err("VGIC redist and dist frames overlap\n");
558 return -EINVAL;
559 }
560
561
562
563
564
565 if (!vgic_initialized(kvm)) {
566 return -EBUSY;
567 }
568
569 ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
570 if (ret) {
571 kvm_err("Unable to register VGICv3 dist MMIO regions\n");
572 return ret;
573 }
574
575 if (kvm_vgic_global_state.has_gicv4_1)
576 vgic_v4_configure_vsgis(kvm);
577
578 return 0;
579}
580
581DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
582
583static int __init early_group0_trap_cfg(char *buf)
584{
585 return strtobool(buf, &group0_trap);
586}
587early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg);
588
589static int __init early_group1_trap_cfg(char *buf)
590{
591 return strtobool(buf, &group1_trap);
592}
593early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg);
594
595static int __init early_common_trap_cfg(char *buf)
596{
597 return strtobool(buf, &common_trap);
598}
599early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
600
601static int __init early_gicv4_enable(char *buf)
602{
603 return strtobool(buf, &gicv4_enable);
604}
605early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
606
607
608
609
610
611
612
613
614int vgic_v3_probe(const struct gic_kvm_info *info)
615{
616 u64 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config);
617 bool has_v2;
618 int ret;
619
620 has_v2 = ich_vtr_el2 >> 63;
621 ich_vtr_el2 = (u32)ich_vtr_el2;
622
623
624
625
626
627 kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
628 kvm_vgic_global_state.can_emulate_gicv2 = false;
629 kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
630
631
632 if (info->has_v4) {
633 kvm_vgic_global_state.has_gicv4 = gicv4_enable;
634 kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
635 kvm_info("GICv4%s support %sabled\n",
636 kvm_vgic_global_state.has_gicv4_1 ? ".1" : "",
637 gicv4_enable ? "en" : "dis");
638 }
639
640 kvm_vgic_global_state.vcpu_base = 0;
641
642 if (!info->vcpu.start) {
643 kvm_info("GICv3: no GICV resource entry\n");
644 } else if (!has_v2) {
645 pr_warn(FW_BUG "CPU interface incapable of MMIO access\n");
646 } else if (!PAGE_ALIGNED(info->vcpu.start)) {
647 pr_warn("GICV physical address 0x%llx not page aligned\n",
648 (unsigned long long)info->vcpu.start);
649 } else {
650 kvm_vgic_global_state.vcpu_base = info->vcpu.start;
651 kvm_vgic_global_state.can_emulate_gicv2 = true;
652 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
653 if (ret) {
654 kvm_err("Cannot register GICv2 KVM device.\n");
655 return ret;
656 }
657 kvm_info("vgic-v2@%llx\n", info->vcpu.start);
658 }
659 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
660 if (ret) {
661 kvm_err("Cannot register GICv3 KVM device.\n");
662 kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2);
663 return ret;
664 }
665
666 if (kvm_vgic_global_state.vcpu_base == 0)
667 kvm_info("disabling GICv2 emulation\n");
668
669 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
670 group0_trap = true;
671 group1_trap = true;
672 }
673
674 if (group0_trap || group1_trap || common_trap) {
675 kvm_info("GICv3 sysreg trapping enabled ([%s%s%s], reduced performance)\n",
676 group0_trap ? "G0" : "",
677 group1_trap ? "G1" : "",
678 common_trap ? "C" : "");
679 static_branch_enable(&vgic_v3_cpuif_trap);
680 }
681
682 kvm_vgic_global_state.vctrl_base = NULL;
683 kvm_vgic_global_state.type = VGIC_V3;
684 kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS;
685
686 return 0;
687}
688
689void vgic_v3_load(struct kvm_vcpu *vcpu)
690{
691 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
692
693
694
695
696
697
698 if (likely(cpu_if->vgic_sre))
699 kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
700
701 kvm_call_hyp(__vgic_v3_restore_aprs, cpu_if);
702
703 if (has_vhe())
704 __vgic_v3_activate_traps(cpu_if);
705
706 WARN_ON(vgic_v4_load(vcpu));
707}
708
709void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
710{
711 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
712
713 if (likely(cpu_if->vgic_sre))
714 cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
715}
716
717void vgic_v3_put(struct kvm_vcpu *vcpu)
718{
719 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
720
721 WARN_ON(vgic_v4_put(vcpu, false));
722
723 vgic_v3_vmcr_sync(vcpu);
724
725 kvm_call_hyp(__vgic_v3_save_aprs, cpu_if);
726
727 if (has_vhe())
728 __vgic_v3_deactivate_traps(cpu_if);
729}
730