1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/uaccess.h>
18#include <linux/interrupt.h>
19#include <linux/cpu.h>
20#include <linux/kvm_host.h>
21#include <kvm/arm_vgic.h>
22#include <asm/kvm_emulate.h>
23#include <asm/kvm_mmu.h>
24#include "vgic.h"
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63void kvm_vgic_early_init(struct kvm *kvm)
64{
65 struct vgic_dist *dist = &kvm->arch.vgic;
66
67 INIT_LIST_HEAD(&dist->lpi_list_head);
68 INIT_LIST_HEAD(&dist->lpi_translation_cache);
69 raw_spin_lock_init(&dist->lpi_list_lock);
70}
71
72
73
74
75
76
77
78
79
80
81
82int kvm_vgic_create(struct kvm *kvm, u32 type)
83{
84 int i, ret;
85 struct kvm_vcpu *vcpu;
86
87 if (irqchip_in_kernel(kvm))
88 return -EEXIST;
89
90
91
92
93
94
95
96 if (type == KVM_DEV_TYPE_ARM_VGIC_V2 &&
97 !kvm_vgic_global_state.can_emulate_gicv2)
98 return -ENODEV;
99
100 ret = -EBUSY;
101 if (!lock_all_vcpus(kvm))
102 return ret;
103
104 kvm_for_each_vcpu(i, vcpu, kvm) {
105 if (vcpu->arch.has_run_once)
106 goto out_unlock;
107 }
108 ret = 0;
109
110 if (type == KVM_DEV_TYPE_ARM_VGIC_V2)
111 kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS;
112 else
113 kvm->arch.max_vcpus = VGIC_V3_MAX_CPUS;
114
115 if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus) {
116 ret = -E2BIG;
117 goto out_unlock;
118 }
119
120 kvm->arch.vgic.in_kernel = true;
121 kvm->arch.vgic.vgic_model = type;
122
123 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
124
125 if (type == KVM_DEV_TYPE_ARM_VGIC_V2)
126 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
127 else
128 INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
129
130out_unlock:
131 unlock_all_vcpus(kvm);
132 return ret;
133}
134
135
136
137
138
139
140
141
142static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
143{
144 struct vgic_dist *dist = &kvm->arch.vgic;
145 struct kvm_vcpu *vcpu0 = kvm_get_vcpu(kvm, 0);
146 int i;
147
148 dist->spis = kcalloc(nr_spis, sizeof(struct vgic_irq), GFP_KERNEL);
149 if (!dist->spis)
150 return -ENOMEM;
151
152
153
154
155
156
157
158
159
160 for (i = 0; i < nr_spis; i++) {
161 struct vgic_irq *irq = &dist->spis[i];
162
163 irq->intid = i + VGIC_NR_PRIVATE_IRQS;
164 INIT_LIST_HEAD(&irq->ap_list);
165 raw_spin_lock_init(&irq->irq_lock);
166 irq->vcpu = NULL;
167 irq->target_vcpu = vcpu0;
168 kref_init(&irq->refcount);
169 switch (dist->vgic_model) {
170 case KVM_DEV_TYPE_ARM_VGIC_V2:
171 irq->targets = 0;
172 irq->group = 0;
173 break;
174 case KVM_DEV_TYPE_ARM_VGIC_V3:
175 irq->mpidr = 0;
176 irq->group = 1;
177 break;
178 default:
179 kfree(dist->spis);
180 dist->spis = NULL;
181 return -EINVAL;
182 }
183 }
184 return 0;
185}
186
187
188
189
190
191
192
193
194
195
196int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
197{
198 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
199 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
200 int ret = 0;
201 int i;
202
203 vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
204
205 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
206 raw_spin_lock_init(&vgic_cpu->ap_list_lock);
207 atomic_set(&vgic_cpu->vgic_v3.its_vpe.vlpi_count, 0);
208
209
210
211
212
213 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
214 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
215
216 INIT_LIST_HEAD(&irq->ap_list);
217 raw_spin_lock_init(&irq->irq_lock);
218 irq->intid = i;
219 irq->vcpu = NULL;
220 irq->target_vcpu = vcpu;
221 kref_init(&irq->refcount);
222 if (vgic_irq_is_sgi(i)) {
223
224 irq->enabled = 1;
225 irq->config = VGIC_CONFIG_EDGE;
226 } else {
227
228 irq->config = VGIC_CONFIG_LEVEL;
229 }
230 }
231
232 if (!irqchip_in_kernel(vcpu->kvm))
233 return 0;
234
235
236
237
238
239 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
240 mutex_lock(&vcpu->kvm->lock);
241 ret = vgic_register_redist_iodev(vcpu);
242 mutex_unlock(&vcpu->kvm->lock);
243 }
244 return ret;
245}
246
247static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu)
248{
249 if (kvm_vgic_global_state.type == VGIC_V2)
250 vgic_v2_enable(vcpu);
251 else
252 vgic_v3_enable(vcpu);
253}
254
255
256
257
258
259
260
261
262
263
264
265int vgic_init(struct kvm *kvm)
266{
267 struct vgic_dist *dist = &kvm->arch.vgic;
268 struct kvm_vcpu *vcpu;
269 int ret = 0, i, idx;
270
271 if (vgic_initialized(kvm))
272 return 0;
273
274
275 if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
276 return -EBUSY;
277
278
279 if (!dist->nr_spis)
280 dist->nr_spis = VGIC_NR_IRQS_LEGACY - VGIC_NR_PRIVATE_IRQS;
281
282 ret = kvm_vgic_dist_init(kvm, dist->nr_spis);
283 if (ret)
284 goto out;
285
286
287 kvm_for_each_vcpu(idx, vcpu, kvm) {
288 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
289
290 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
291 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
292 switch (dist->vgic_model) {
293 case KVM_DEV_TYPE_ARM_VGIC_V3:
294 irq->group = 1;
295 irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
296 break;
297 case KVM_DEV_TYPE_ARM_VGIC_V2:
298 irq->group = 0;
299 irq->targets = 1U << idx;
300 break;
301 default:
302 ret = -EINVAL;
303 goto out;
304 }
305 }
306 }
307
308 if (vgic_has_its(kvm))
309 vgic_lpi_translation_cache_init(kvm);
310
311
312
313
314
315
316 if (vgic_supports_direct_msis(kvm)) {
317 ret = vgic_v4_init(kvm);
318 if (ret)
319 goto out;
320 }
321
322 kvm_for_each_vcpu(i, vcpu, kvm)
323 kvm_vgic_vcpu_enable(vcpu);
324
325 ret = kvm_vgic_setup_default_irq_routing(kvm);
326 if (ret)
327 goto out;
328
329 vgic_debug_init(kvm);
330
331 dist->implementation_rev = 2;
332 dist->initialized = true;
333
334out:
335 return ret;
336}
337
338static void kvm_vgic_dist_destroy(struct kvm *kvm)
339{
340 struct vgic_dist *dist = &kvm->arch.vgic;
341 struct vgic_redist_region *rdreg, *next;
342
343 dist->ready = false;
344 dist->initialized = false;
345
346 kfree(dist->spis);
347 dist->spis = NULL;
348 dist->nr_spis = 0;
349
350 if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
351 list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list) {
352 list_del(&rdreg->list);
353 kfree(rdreg);
354 }
355 INIT_LIST_HEAD(&dist->rd_regions);
356 }
357
358 if (vgic_has_its(kvm))
359 vgic_lpi_translation_cache_destroy(kvm);
360
361 if (vgic_supports_direct_msis(kvm))
362 vgic_v4_teardown(kvm);
363}
364
365void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
366{
367 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
368
369
370
371
372
373 vgic_flush_pending_lpis(vcpu);
374
375 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
376}
377
378
379static void __kvm_vgic_destroy(struct kvm *kvm)
380{
381 struct kvm_vcpu *vcpu;
382 int i;
383
384 vgic_debug_destroy(kvm);
385
386 kvm_for_each_vcpu(i, vcpu, kvm)
387 kvm_vgic_vcpu_destroy(vcpu);
388
389 kvm_vgic_dist_destroy(kvm);
390}
391
392void kvm_vgic_destroy(struct kvm *kvm)
393{
394 mutex_lock(&kvm->lock);
395 __kvm_vgic_destroy(kvm);
396 mutex_unlock(&kvm->lock);
397}
398
399
400
401
402
403
404
405int vgic_lazy_init(struct kvm *kvm)
406{
407 int ret = 0;
408
409 if (unlikely(!vgic_initialized(kvm))) {
410
411
412
413
414
415
416 if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
417 return -EBUSY;
418
419 mutex_lock(&kvm->lock);
420 ret = vgic_init(kvm);
421 mutex_unlock(&kvm->lock);
422 }
423
424 return ret;
425}
426
427
428
429
430
431
432
433
434
435
436
437int kvm_vgic_map_resources(struct kvm *kvm)
438{
439 struct vgic_dist *dist = &kvm->arch.vgic;
440 int ret = 0;
441
442 mutex_lock(&kvm->lock);
443 if (!irqchip_in_kernel(kvm))
444 goto out;
445
446 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
447 ret = vgic_v2_map_resources(kvm);
448 else
449 ret = vgic_v3_map_resources(kvm);
450
451 if (ret)
452 __kvm_vgic_destroy(kvm);
453
454out:
455 mutex_unlock(&kvm->lock);
456 return ret;
457}
458
459
460
461static int vgic_init_cpu_starting(unsigned int cpu)
462{
463 enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0);
464 return 0;
465}
466
467
468static int vgic_init_cpu_dying(unsigned int cpu)
469{
470 disable_percpu_irq(kvm_vgic_global_state.maint_irq);
471 return 0;
472}
473
474static irqreturn_t vgic_maintenance_handler(int irq, void *data)
475{
476
477
478
479
480
481
482 return IRQ_HANDLED;
483}
484
485
486
487
488
489
490void kvm_vgic_init_cpu_hardware(void)
491{
492 BUG_ON(preemptible());
493
494
495
496
497
498 if (kvm_vgic_global_state.type == VGIC_V2)
499 vgic_v2_init_lrs();
500 else
501 kvm_call_hyp(__vgic_v3_init_lrs);
502}
503
504
505
506
507
508
509
510int kvm_vgic_hyp_init(void)
511{
512 const struct gic_kvm_info *gic_kvm_info;
513 int ret;
514
515 gic_kvm_info = gic_get_kvm_info();
516 if (!gic_kvm_info)
517 return -ENODEV;
518
519 if (!gic_kvm_info->maint_irq) {
520 kvm_err("No vgic maintenance irq\n");
521 return -ENXIO;
522 }
523
524 switch (gic_kvm_info->type) {
525 case GIC_V2:
526 ret = vgic_v2_probe(gic_kvm_info);
527 break;
528 case GIC_V3:
529 ret = vgic_v3_probe(gic_kvm_info);
530 if (!ret) {
531 static_branch_enable(&kvm_vgic_global_state.gicv3_cpuif);
532 kvm_info("GIC system register CPU interface enabled\n");
533 }
534 break;
535 default:
536 ret = -ENODEV;
537 }
538
539 if (ret)
540 return ret;
541
542 kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq;
543 ret = request_percpu_irq(kvm_vgic_global_state.maint_irq,
544 vgic_maintenance_handler,
545 "vgic", kvm_get_running_vcpus());
546 if (ret) {
547 kvm_err("Cannot register interrupt %d\n",
548 kvm_vgic_global_state.maint_irq);
549 return ret;
550 }
551
552 ret = cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
553 "kvm/arm/vgic:starting",
554 vgic_init_cpu_starting, vgic_init_cpu_dying);
555 if (ret) {
556 kvm_err("Cannot register vgic CPU notifier\n");
557 goto out_free_irq;
558 }
559
560 kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq);
561 return 0;
562
563out_free_irq:
564 free_percpu_irq(kvm_vgic_global_state.maint_irq,
565 kvm_get_running_vcpus());
566 return ret;
567}
568