1
2
3
4
5
6#include <linux/uaccess.h>
7#include <linux/interrupt.h>
8#include <linux/cpu.h>
9#include <linux/kvm_host.h>
10#include <kvm/arm_vgic.h>
11#include <asm/kvm_emulate.h>
12#include <asm/kvm_mmu.h>
13#include "vgic.h"
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52void kvm_vgic_early_init(struct kvm *kvm)
53{
54 struct vgic_dist *dist = &kvm->arch.vgic;
55
56 INIT_LIST_HEAD(&dist->lpi_list_head);
57 raw_spin_lock_init(&dist->lpi_list_lock);
58}
59
60
61
62
63
64
65
66
67
68
69
70int kvm_vgic_create(struct kvm *kvm, u32 type)
71{
72 int i, vcpu_lock_idx = -1, ret;
73 struct kvm_vcpu *vcpu;
74
75 if (irqchip_in_kernel(kvm))
76 return -EEXIST;
77
78
79
80
81
82
83
84 if (type == KVM_DEV_TYPE_ARM_VGIC_V2 &&
85 !kvm_vgic_global_state.can_emulate_gicv2)
86 return -ENODEV;
87
88
89
90
91
92
93 ret = -EBUSY;
94 kvm_for_each_vcpu(i, vcpu, kvm) {
95 if (!mutex_trylock(&vcpu->mutex))
96 goto out_unlock;
97 vcpu_lock_idx = i;
98 }
99
100 kvm_for_each_vcpu(i, vcpu, kvm) {
101 if (vcpu->arch.has_run_once)
102 goto out_unlock;
103 }
104 ret = 0;
105
106 if (type == KVM_DEV_TYPE_ARM_VGIC_V2)
107 kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS;
108 else
109 kvm->arch.max_vcpus = VGIC_V3_MAX_CPUS;
110
111 if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus) {
112 ret = -E2BIG;
113 goto out_unlock;
114 }
115
116 kvm->arch.vgic.in_kernel = true;
117 kvm->arch.vgic.vgic_model = type;
118
119 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
120
121 if (type == KVM_DEV_TYPE_ARM_VGIC_V2)
122 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
123 else
124 INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
125
126out_unlock:
127 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
128 vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
129 mutex_unlock(&vcpu->mutex);
130 }
131 return ret;
132}
133
134
135
136
137
138
139
140
141static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
142{
143 struct vgic_dist *dist = &kvm->arch.vgic;
144 struct kvm_vcpu *vcpu0 = kvm_get_vcpu(kvm, 0);
145 int i;
146
147 dist->spis = kcalloc(nr_spis, sizeof(struct vgic_irq), GFP_KERNEL);
148 if (!dist->spis)
149 return -ENOMEM;
150
151
152
153
154
155
156
157
158
159 for (i = 0; i < nr_spis; i++) {
160 struct vgic_irq *irq = &dist->spis[i];
161
162 irq->intid = i + VGIC_NR_PRIVATE_IRQS;
163 INIT_LIST_HEAD(&irq->ap_list);
164 raw_spin_lock_init(&irq->irq_lock);
165 irq->vcpu = NULL;
166 irq->target_vcpu = vcpu0;
167 kref_init(&irq->refcount);
168 switch (dist->vgic_model) {
169 case KVM_DEV_TYPE_ARM_VGIC_V2:
170 irq->targets = 0;
171 irq->group = 0;
172 break;
173 case KVM_DEV_TYPE_ARM_VGIC_V3:
174 irq->mpidr = 0;
175 irq->group = 1;
176 break;
177 default:
178 kfree(dist->spis);
179 return -EINVAL;
180 }
181 }
182 return 0;
183}
184
185
186
187
188
189
190
191
192
193
194int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
195{
196 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
197 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
198 int ret = 0;
199 int i;
200
201 vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
202 vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF;
203
204 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
205 raw_spin_lock_init(&vgic_cpu->ap_list_lock);
206
207
208
209
210
211 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
212 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
213
214 INIT_LIST_HEAD(&irq->ap_list);
215 raw_spin_lock_init(&irq->irq_lock);
216 irq->intid = i;
217 irq->vcpu = NULL;
218 irq->target_vcpu = vcpu;
219 kref_init(&irq->refcount);
220 if (vgic_irq_is_sgi(i)) {
221
222 irq->enabled = 1;
223 irq->config = VGIC_CONFIG_EDGE;
224 } else {
225
226 irq->config = VGIC_CONFIG_LEVEL;
227 }
228 }
229
230 if (!irqchip_in_kernel(vcpu->kvm))
231 return 0;
232
233
234
235
236
237 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
238 mutex_lock(&vcpu->kvm->lock);
239 ret = vgic_register_redist_iodev(vcpu);
240 mutex_unlock(&vcpu->kvm->lock);
241 }
242 return ret;
243}
244
245static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu)
246{
247 if (kvm_vgic_global_state.type == VGIC_V2)
248 vgic_v2_enable(vcpu);
249 else
250 vgic_v3_enable(vcpu);
251}
252
253
254
255
256
257
258
259
260
261
262
263int vgic_init(struct kvm *kvm)
264{
265 struct vgic_dist *dist = &kvm->arch.vgic;
266 struct kvm_vcpu *vcpu;
267 int ret = 0, i, idx;
268
269 if (vgic_initialized(kvm))
270 return 0;
271
272
273 if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
274 return -EBUSY;
275
276
277 if (!dist->nr_spis)
278 dist->nr_spis = VGIC_NR_IRQS_LEGACY - VGIC_NR_PRIVATE_IRQS;
279
280 ret = kvm_vgic_dist_init(kvm, dist->nr_spis);
281 if (ret)
282 goto out;
283
284
285 kvm_for_each_vcpu(idx, vcpu, kvm) {
286 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
287
288 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
289 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
290 switch (dist->vgic_model) {
291 case KVM_DEV_TYPE_ARM_VGIC_V3:
292 irq->group = 1;
293 irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
294 break;
295 case KVM_DEV_TYPE_ARM_VGIC_V2:
296 irq->group = 0;
297 irq->targets = 1U << idx;
298 break;
299 default:
300 ret = -EINVAL;
301 goto out;
302 }
303 }
304 }
305
306 if (vgic_has_its(kvm)) {
307 ret = vgic_v4_init(kvm);
308 if (ret)
309 goto out;
310 }
311
312 kvm_for_each_vcpu(i, vcpu, kvm)
313 kvm_vgic_vcpu_enable(vcpu);
314
315 ret = kvm_vgic_setup_default_irq_routing(kvm);
316 if (ret)
317 goto out;
318
319 vgic_debug_init(kvm);
320
321 dist->implementation_rev = 2;
322 dist->initialized = true;
323
324out:
325 return ret;
326}
327
328static void kvm_vgic_dist_destroy(struct kvm *kvm)
329{
330 struct vgic_dist *dist = &kvm->arch.vgic;
331 struct vgic_redist_region *rdreg, *next;
332
333 dist->ready = false;
334 dist->initialized = false;
335
336 kfree(dist->spis);
337 dist->spis = NULL;
338 dist->nr_spis = 0;
339
340 if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
341 list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list) {
342 list_del(&rdreg->list);
343 kfree(rdreg);
344 }
345 INIT_LIST_HEAD(&dist->rd_regions);
346 }
347
348 if (vgic_supports_direct_msis(kvm))
349 vgic_v4_teardown(kvm);
350}
351
352void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
353{
354 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
355
356 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
357}
358
359
360static void __kvm_vgic_destroy(struct kvm *kvm)
361{
362 struct kvm_vcpu *vcpu;
363 int i;
364
365 vgic_debug_destroy(kvm);
366
367 kvm_vgic_dist_destroy(kvm);
368
369 kvm_for_each_vcpu(i, vcpu, kvm)
370 kvm_vgic_vcpu_destroy(vcpu);
371}
372
373void kvm_vgic_destroy(struct kvm *kvm)
374{
375 mutex_lock(&kvm->lock);
376 __kvm_vgic_destroy(kvm);
377 mutex_unlock(&kvm->lock);
378}
379
380
381
382
383
384
385
386int vgic_lazy_init(struct kvm *kvm)
387{
388 int ret = 0;
389
390 if (unlikely(!vgic_initialized(kvm))) {
391
392
393
394
395
396
397 if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
398 return -EBUSY;
399
400 mutex_lock(&kvm->lock);
401 ret = vgic_init(kvm);
402 mutex_unlock(&kvm->lock);
403 }
404
405 return ret;
406}
407
408
409
410
411
412
413
414
415
416
417
418int kvm_vgic_map_resources(struct kvm *kvm)
419{
420 struct vgic_dist *dist = &kvm->arch.vgic;
421 int ret = 0;
422
423 mutex_lock(&kvm->lock);
424 if (!irqchip_in_kernel(kvm))
425 goto out;
426
427 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
428 ret = vgic_v2_map_resources(kvm);
429 else
430 ret = vgic_v3_map_resources(kvm);
431
432 if (ret)
433 __kvm_vgic_destroy(kvm);
434
435out:
436 mutex_unlock(&kvm->lock);
437 return ret;
438}
439
440
441
442static int vgic_init_cpu_starting(unsigned int cpu)
443{
444 enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0);
445 return 0;
446}
447
448
449static int vgic_init_cpu_dying(unsigned int cpu)
450{
451 disable_percpu_irq(kvm_vgic_global_state.maint_irq);
452 return 0;
453}
454
455static irqreturn_t vgic_maintenance_handler(int irq, void *data)
456{
457
458
459
460
461
462
463 return IRQ_HANDLED;
464}
465
466
467
468
469
470
471void kvm_vgic_init_cpu_hardware(void)
472{
473 BUG_ON(preemptible());
474
475
476
477
478
479 if (kvm_vgic_global_state.type == VGIC_V2)
480 vgic_v2_init_lrs();
481 else
482 kvm_call_hyp(__vgic_v3_init_lrs);
483}
484
485
486
487
488
489
490
491int kvm_vgic_hyp_init(void)
492{
493 const struct gic_kvm_info *gic_kvm_info;
494 int ret;
495
496 gic_kvm_info = gic_get_kvm_info();
497 if (!gic_kvm_info)
498 return -ENODEV;
499
500 if (!gic_kvm_info->maint_irq) {
501 kvm_err("No vgic maintenance irq\n");
502 return -ENXIO;
503 }
504
505 switch (gic_kvm_info->type) {
506 case GIC_V2:
507 ret = vgic_v2_probe(gic_kvm_info);
508 break;
509 case GIC_V3:
510 ret = vgic_v3_probe(gic_kvm_info);
511 if (!ret) {
512 static_branch_enable(&kvm_vgic_global_state.gicv3_cpuif);
513 kvm_info("GIC system register CPU interface enabled\n");
514 }
515 break;
516 default:
517 ret = -ENODEV;
518 };
519
520 if (ret)
521 return ret;
522
523 kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq;
524 ret = request_percpu_irq(kvm_vgic_global_state.maint_irq,
525 vgic_maintenance_handler,
526 "vgic", kvm_get_running_vcpus());
527 if (ret) {
528 kvm_err("Cannot register interrupt %d\n",
529 kvm_vgic_global_state.maint_irq);
530 return ret;
531 }
532
533 ret = cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
534 "kvm/arm/vgic:starting",
535 vgic_init_cpu_starting, vgic_init_cpu_dying);
536 if (ret) {
537 kvm_err("Cannot register vgic CPU notifier\n");
538 goto out_free_irq;
539 }
540
541 kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq);
542 return 0;
543
544out_free_irq:
545 free_percpu_irq(kvm_vgic_global_state.maint_irq,
546 kvm_get_running_vcpus());
547 return ret;
548}
549