1
2
3
4
5
6
7#include <linux/interrupt.h>
8#include <linux/irq.h>
9#include <linux/irqdomain.h>
10#include <linux/kvm_host.h>
11#include <linux/irqchip/arm-gic-v3.h>
12
13#include "vgic.h"
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82#define DB_IRQ_FLAGS (IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY | IRQ_NO_BALANCING)
83
84static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
85{
86 struct kvm_vcpu *vcpu = info;
87
88
89 if (!kvm_vgic_global_state.has_gicv4_1 &&
90 !irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
91 disable_irq_nosync(irq);
92
93
94
95
96
97
98 raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
99 vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
100 raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
101
102 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
103 kvm_vcpu_kick(vcpu);
104
105 return IRQ_HANDLED;
106}
107
108static void vgic_v4_sync_sgi_config(struct its_vpe *vpe, struct vgic_irq *irq)
109{
110 vpe->sgi_config[irq->intid].enabled = irq->enabled;
111 vpe->sgi_config[irq->intid].group = irq->group;
112 vpe->sgi_config[irq->intid].priority = irq->priority;
113}
114
115static void vgic_v4_enable_vsgis(struct kvm_vcpu *vcpu)
116{
117 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
118 int i;
119
120
121
122
123
124
125 for (i = 0; i < VGIC_NR_SGIS; i++) {
126 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
127 struct irq_desc *desc;
128 unsigned long flags;
129 int ret;
130
131 raw_spin_lock_irqsave(&irq->irq_lock, flags);
132
133 if (irq->hw)
134 goto unlock;
135
136 irq->hw = true;
137 irq->host_irq = irq_find_mapping(vpe->sgi_domain, i);
138
139
140 vgic_v4_sync_sgi_config(vpe, irq);
141 desc = irq_to_desc(irq->host_irq);
142 ret = irq_domain_activate_irq(irq_desc_get_irq_data(desc),
143 false);
144 if (!WARN_ON(ret)) {
145
146 ret = irq_set_irqchip_state(irq->host_irq,
147 IRQCHIP_STATE_PENDING,
148 irq->pending_latch);
149 WARN_ON(ret);
150 irq->pending_latch = false;
151 }
152 unlock:
153 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
154 vgic_put_irq(vcpu->kvm, irq);
155 }
156}
157
158static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu)
159{
160 int i;
161
162 for (i = 0; i < VGIC_NR_SGIS; i++) {
163 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
164 struct irq_desc *desc;
165 unsigned long flags;
166 int ret;
167
168 raw_spin_lock_irqsave(&irq->irq_lock, flags);
169
170 if (!irq->hw)
171 goto unlock;
172
173 irq->hw = false;
174 ret = irq_get_irqchip_state(irq->host_irq,
175 IRQCHIP_STATE_PENDING,
176 &irq->pending_latch);
177 WARN_ON(ret);
178
179 desc = irq_to_desc(irq->host_irq);
180 irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
181 unlock:
182 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
183 vgic_put_irq(vcpu->kvm, irq);
184 }
185}
186
187
188void vgic_v4_configure_vsgis(struct kvm *kvm)
189{
190 struct vgic_dist *dist = &kvm->arch.vgic;
191 struct kvm_vcpu *vcpu;
192 int i;
193
194 kvm_arm_halt_guest(kvm);
195
196 kvm_for_each_vcpu(i, vcpu, kvm) {
197 if (dist->nassgireq)
198 vgic_v4_enable_vsgis(vcpu);
199 else
200 vgic_v4_disable_vsgis(vcpu);
201 }
202
203 kvm_arm_resume_guest(kvm);
204}
205
206
207
208
209
210
211
212void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val)
213{
214 struct its_vpe *vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
215 int mask = BIT(irq->intid % BITS_PER_BYTE);
216 void *va;
217 u8 *ptr;
218
219 va = page_address(vpe->vpt_page);
220 ptr = va + irq->intid / BITS_PER_BYTE;
221
222 *val = !!(*ptr & mask);
223}
224
225
226
227
228
229
230
231
232
233
234int vgic_v4_init(struct kvm *kvm)
235{
236 struct vgic_dist *dist = &kvm->arch.vgic;
237 struct kvm_vcpu *vcpu;
238 int i, nr_vcpus, ret;
239
240 if (!kvm_vgic_global_state.has_gicv4)
241 return 0;
242
243 if (dist->its_vm.vpes)
244 return 0;
245
246 nr_vcpus = atomic_read(&kvm->online_vcpus);
247
248 dist->its_vm.vpes = kcalloc(nr_vcpus, sizeof(*dist->its_vm.vpes),
249 GFP_KERNEL);
250 if (!dist->its_vm.vpes)
251 return -ENOMEM;
252
253 dist->its_vm.nr_vpes = nr_vcpus;
254
255 kvm_for_each_vcpu(i, vcpu, kvm)
256 dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
257
258 ret = its_alloc_vcpu_irqs(&dist->its_vm);
259 if (ret < 0) {
260 kvm_err("VPE IRQ allocation failure\n");
261 kfree(dist->its_vm.vpes);
262 dist->its_vm.nr_vpes = 0;
263 dist->its_vm.vpes = NULL;
264 return ret;
265 }
266
267 kvm_for_each_vcpu(i, vcpu, kvm) {
268 int irq = dist->its_vm.vpes[i]->irq;
269 unsigned long irq_flags = DB_IRQ_FLAGS;
270
271
272
273
274
275
276
277
278
279
280
281 if (kvm_vgic_global_state.has_gicv4_1)
282 irq_flags &= ~IRQ_NOAUTOEN;
283 irq_set_status_flags(irq, irq_flags);
284
285 ret = request_irq(irq, vgic_v4_doorbell_handler,
286 0, "vcpu", vcpu);
287 if (ret) {
288 kvm_err("failed to allocate vcpu IRQ%d\n", irq);
289
290
291
292
293 dist->its_vm.nr_vpes = i;
294 break;
295 }
296 }
297
298 if (ret)
299 vgic_v4_teardown(kvm);
300
301 return ret;
302}
303
304
305
306
307
308
309
310void vgic_v4_teardown(struct kvm *kvm)
311{
312 struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
313 int i;
314
315 if (!its_vm->vpes)
316 return;
317
318 for (i = 0; i < its_vm->nr_vpes; i++) {
319 struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i);
320 int irq = its_vm->vpes[i]->irq;
321
322 irq_clear_status_flags(irq, DB_IRQ_FLAGS);
323 free_irq(irq, vcpu);
324 }
325
326 its_free_vcpu_irqs(its_vm);
327 kfree(its_vm->vpes);
328 its_vm->nr_vpes = 0;
329 its_vm->vpes = NULL;
330}
331
332int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
333{
334 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
335
336 if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
337 return 0;
338
339 return its_make_vpe_non_resident(vpe, need_db);
340}
341
342int vgic_v4_load(struct kvm_vcpu *vcpu)
343{
344 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
345 int err;
346
347 if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident)
348 return 0;
349
350
351
352
353
354
355
356 err = irq_set_affinity(vpe->irq, cpumask_of(smp_processor_id()));
357 if (err)
358 return err;
359
360 err = its_make_vpe_resident(vpe, false, vcpu->kvm->arch.vgic.enabled);
361 if (err)
362 return err;
363
364
365
366
367
368
369 if (!kvm_vgic_global_state.has_gicv4_1)
370 err = irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
371
372 return err;
373}
374
375void vgic_v4_commit(struct kvm_vcpu *vcpu)
376{
377 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
378
379
380
381
382
383 if (!vpe->ready)
384 its_commit_vpe(vpe);
385}
386
387static struct vgic_its *vgic_get_its(struct kvm *kvm,
388 struct kvm_kernel_irq_routing_entry *irq_entry)
389{
390 struct kvm_msi msi = (struct kvm_msi) {
391 .address_lo = irq_entry->msi.address_lo,
392 .address_hi = irq_entry->msi.address_hi,
393 .data = irq_entry->msi.data,
394 .flags = irq_entry->msi.flags,
395 .devid = irq_entry->msi.devid,
396 };
397
398 return vgic_msi_to_its(kvm, &msi);
399}
400
401int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
402 struct kvm_kernel_irq_routing_entry *irq_entry)
403{
404 struct vgic_its *its;
405 struct vgic_irq *irq;
406 struct its_vlpi_map map;
407 unsigned long flags;
408 int ret;
409
410 if (!vgic_supports_direct_msis(kvm))
411 return 0;
412
413
414
415
416
417 its = vgic_get_its(kvm, irq_entry);
418 if (IS_ERR(its))
419 return 0;
420
421 mutex_lock(&its->its_lock);
422
423
424 ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
425 irq_entry->msi.data, &irq);
426 if (ret)
427 goto out;
428
429
430
431
432
433
434
435 map = (struct its_vlpi_map) {
436 .vm = &kvm->arch.vgic.its_vm,
437 .vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe,
438 .vintid = irq->intid,
439 .properties = ((irq->priority & 0xfc) |
440 (irq->enabled ? LPI_PROP_ENABLED : 0) |
441 LPI_PROP_GROUP1),
442 .db_enabled = true,
443 };
444
445 ret = its_map_vlpi(virq, &map);
446 if (ret)
447 goto out;
448
449 irq->hw = true;
450 irq->host_irq = virq;
451 atomic_inc(&map.vpe->vlpi_count);
452
453
454 raw_spin_lock_irqsave(&irq->irq_lock, flags);
455 if (irq->pending_latch) {
456 ret = irq_set_irqchip_state(irq->host_irq,
457 IRQCHIP_STATE_PENDING,
458 irq->pending_latch);
459 WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);
460
461
462
463
464
465 irq->pending_latch = false;
466 vgic_queue_irq_unlock(kvm, irq, flags);
467 } else {
468 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
469 }
470
471out:
472 mutex_unlock(&its->its_lock);
473 return ret;
474}
475
476int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
477 struct kvm_kernel_irq_routing_entry *irq_entry)
478{
479 struct vgic_its *its;
480 struct vgic_irq *irq;
481 int ret;
482
483 if (!vgic_supports_direct_msis(kvm))
484 return 0;
485
486
487
488
489
490 its = vgic_get_its(kvm, irq_entry);
491 if (IS_ERR(its))
492 return 0;
493
494 mutex_lock(&its->its_lock);
495
496 ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
497 irq_entry->msi.data, &irq);
498 if (ret)
499 goto out;
500
501 WARN_ON(!(irq->hw && irq->host_irq == virq));
502 if (irq->hw) {
503 atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
504 irq->hw = false;
505 ret = its_unmap_vlpi(virq);
506 }
507
508out:
509 mutex_unlock(&its->its_lock);
510 return ret;
511}
512