1
2
3
4
5
6
7#include <linux/interrupt.h>
8#include <linux/irq.h>
9#include <linux/irqdomain.h>
10#include <linux/kvm_host.h>
11#include <linux/irqchip/arm-gic-v3.h>
12
13#include "vgic.h"
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82#define DB_IRQ_FLAGS (IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY | IRQ_NO_BALANCING)
83
84static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
85{
86 struct kvm_vcpu *vcpu = info;
87
88
89 if (!kvm_vgic_global_state.has_gicv4_1 &&
90 !irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
91 disable_irq_nosync(irq);
92
93
94
95
96
97
98 raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
99 vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
100 raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
101
102 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
103 kvm_vcpu_kick(vcpu);
104
105 return IRQ_HANDLED;
106}
107
108static void vgic_v4_sync_sgi_config(struct its_vpe *vpe, struct vgic_irq *irq)
109{
110 vpe->sgi_config[irq->intid].enabled = irq->enabled;
111 vpe->sgi_config[irq->intid].group = irq->group;
112 vpe->sgi_config[irq->intid].priority = irq->priority;
113}
114
115static void vgic_v4_enable_vsgis(struct kvm_vcpu *vcpu)
116{
117 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
118 int i;
119
120
121
122
123
124
125 for (i = 0; i < VGIC_NR_SGIS; i++) {
126 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
127 struct irq_desc *desc;
128 unsigned long flags;
129 int ret;
130
131 raw_spin_lock_irqsave(&irq->irq_lock, flags);
132
133 if (irq->hw)
134 goto unlock;
135
136 irq->hw = true;
137 irq->host_irq = irq_find_mapping(vpe->sgi_domain, i);
138
139
140 vgic_v4_sync_sgi_config(vpe, irq);
141 desc = irq_to_desc(irq->host_irq);
142 ret = irq_domain_activate_irq(irq_desc_get_irq_data(desc),
143 false);
144 if (!WARN_ON(ret)) {
145
146 ret = irq_set_irqchip_state(irq->host_irq,
147 IRQCHIP_STATE_PENDING,
148 irq->pending_latch);
149 WARN_ON(ret);
150 irq->pending_latch = false;
151 }
152 unlock:
153 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
154 vgic_put_irq(vcpu->kvm, irq);
155 }
156}
157
158static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu)
159{
160 int i;
161
162 for (i = 0; i < VGIC_NR_SGIS; i++) {
163 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
164 struct irq_desc *desc;
165 unsigned long flags;
166 int ret;
167
168 raw_spin_lock_irqsave(&irq->irq_lock, flags);
169
170 if (!irq->hw)
171 goto unlock;
172
173 irq->hw = false;
174 ret = irq_get_irqchip_state(irq->host_irq,
175 IRQCHIP_STATE_PENDING,
176 &irq->pending_latch);
177 WARN_ON(ret);
178
179 desc = irq_to_desc(irq->host_irq);
180 irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
181 unlock:
182 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
183 vgic_put_irq(vcpu->kvm, irq);
184 }
185}
186
187
188void vgic_v4_configure_vsgis(struct kvm *kvm)
189{
190 struct vgic_dist *dist = &kvm->arch.vgic;
191 struct kvm_vcpu *vcpu;
192 unsigned long i;
193
194 kvm_arm_halt_guest(kvm);
195
196 kvm_for_each_vcpu(i, vcpu, kvm) {
197 if (dist->nassgireq)
198 vgic_v4_enable_vsgis(vcpu);
199 else
200 vgic_v4_disable_vsgis(vcpu);
201 }
202
203 kvm_arm_resume_guest(kvm);
204}
205
206
207
208
209
210
211
212void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val)
213{
214 struct its_vpe *vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
215 int mask = BIT(irq->intid % BITS_PER_BYTE);
216 void *va;
217 u8 *ptr;
218
219 va = page_address(vpe->vpt_page);
220 ptr = va + irq->intid / BITS_PER_BYTE;
221
222 *val = !!(*ptr & mask);
223}
224
225
226
227
228
229
230
231
232
233
234int vgic_v4_init(struct kvm *kvm)
235{
236 struct vgic_dist *dist = &kvm->arch.vgic;
237 struct kvm_vcpu *vcpu;
238 int nr_vcpus, ret;
239 unsigned long i;
240
241 if (!kvm_vgic_global_state.has_gicv4)
242 return 0;
243
244 if (dist->its_vm.vpes)
245 return 0;
246
247 nr_vcpus = atomic_read(&kvm->online_vcpus);
248
249 dist->its_vm.vpes = kcalloc(nr_vcpus, sizeof(*dist->its_vm.vpes),
250 GFP_KERNEL_ACCOUNT);
251 if (!dist->its_vm.vpes)
252 return -ENOMEM;
253
254 dist->its_vm.nr_vpes = nr_vcpus;
255
256 kvm_for_each_vcpu(i, vcpu, kvm)
257 dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
258
259 ret = its_alloc_vcpu_irqs(&dist->its_vm);
260 if (ret < 0) {
261 kvm_err("VPE IRQ allocation failure\n");
262 kfree(dist->its_vm.vpes);
263 dist->its_vm.nr_vpes = 0;
264 dist->its_vm.vpes = NULL;
265 return ret;
266 }
267
268 kvm_for_each_vcpu(i, vcpu, kvm) {
269 int irq = dist->its_vm.vpes[i]->irq;
270 unsigned long irq_flags = DB_IRQ_FLAGS;
271
272
273
274
275
276
277
278
279
280
281
282 if (kvm_vgic_global_state.has_gicv4_1)
283 irq_flags &= ~IRQ_NOAUTOEN;
284 irq_set_status_flags(irq, irq_flags);
285
286 ret = request_irq(irq, vgic_v4_doorbell_handler,
287 0, "vcpu", vcpu);
288 if (ret) {
289 kvm_err("failed to allocate vcpu IRQ%d\n", irq);
290
291
292
293
294 dist->its_vm.nr_vpes = i;
295 break;
296 }
297 }
298
299 if (ret)
300 vgic_v4_teardown(kvm);
301
302 return ret;
303}
304
305
306
307
308
309
310
311void vgic_v4_teardown(struct kvm *kvm)
312{
313 struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
314 int i;
315
316 if (!its_vm->vpes)
317 return;
318
319 for (i = 0; i < its_vm->nr_vpes; i++) {
320 struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i);
321 int irq = its_vm->vpes[i]->irq;
322
323 irq_clear_status_flags(irq, DB_IRQ_FLAGS);
324 free_irq(irq, vcpu);
325 }
326
327 its_free_vcpu_irqs(its_vm);
328 kfree(its_vm->vpes);
329 its_vm->nr_vpes = 0;
330 its_vm->vpes = NULL;
331}
332
333int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
334{
335 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
336
337 if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
338 return 0;
339
340 return its_make_vpe_non_resident(vpe, need_db);
341}
342
343int vgic_v4_load(struct kvm_vcpu *vcpu)
344{
345 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
346 int err;
347
348 if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident)
349 return 0;
350
351
352
353
354
355
356
357 err = irq_set_affinity(vpe->irq, cpumask_of(smp_processor_id()));
358 if (err)
359 return err;
360
361 err = its_make_vpe_resident(vpe, false, vcpu->kvm->arch.vgic.enabled);
362 if (err)
363 return err;
364
365
366
367
368
369
370 if (!kvm_vgic_global_state.has_gicv4_1)
371 err = irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
372
373 return err;
374}
375
376void vgic_v4_commit(struct kvm_vcpu *vcpu)
377{
378 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
379
380
381
382
383
384 if (!vpe->ready)
385 its_commit_vpe(vpe);
386}
387
388static struct vgic_its *vgic_get_its(struct kvm *kvm,
389 struct kvm_kernel_irq_routing_entry *irq_entry)
390{
391 struct kvm_msi msi = (struct kvm_msi) {
392 .address_lo = irq_entry->msi.address_lo,
393 .address_hi = irq_entry->msi.address_hi,
394 .data = irq_entry->msi.data,
395 .flags = irq_entry->msi.flags,
396 .devid = irq_entry->msi.devid,
397 };
398
399 return vgic_msi_to_its(kvm, &msi);
400}
401
402int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
403 struct kvm_kernel_irq_routing_entry *irq_entry)
404{
405 struct vgic_its *its;
406 struct vgic_irq *irq;
407 struct its_vlpi_map map;
408 unsigned long flags;
409 int ret;
410
411 if (!vgic_supports_direct_msis(kvm))
412 return 0;
413
414
415
416
417
418 its = vgic_get_its(kvm, irq_entry);
419 if (IS_ERR(its))
420 return 0;
421
422 mutex_lock(&its->its_lock);
423
424
425 ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
426 irq_entry->msi.data, &irq);
427 if (ret)
428 goto out;
429
430
431
432
433
434
435
436 map = (struct its_vlpi_map) {
437 .vm = &kvm->arch.vgic.its_vm,
438 .vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe,
439 .vintid = irq->intid,
440 .properties = ((irq->priority & 0xfc) |
441 (irq->enabled ? LPI_PROP_ENABLED : 0) |
442 LPI_PROP_GROUP1),
443 .db_enabled = true,
444 };
445
446 ret = its_map_vlpi(virq, &map);
447 if (ret)
448 goto out;
449
450 irq->hw = true;
451 irq->host_irq = virq;
452 atomic_inc(&map.vpe->vlpi_count);
453
454
455 raw_spin_lock_irqsave(&irq->irq_lock, flags);
456 if (irq->pending_latch) {
457 ret = irq_set_irqchip_state(irq->host_irq,
458 IRQCHIP_STATE_PENDING,
459 irq->pending_latch);
460 WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);
461
462
463
464
465
466 irq->pending_latch = false;
467 vgic_queue_irq_unlock(kvm, irq, flags);
468 } else {
469 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
470 }
471
472out:
473 mutex_unlock(&its->its_lock);
474 return ret;
475}
476
477int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
478 struct kvm_kernel_irq_routing_entry *irq_entry)
479{
480 struct vgic_its *its;
481 struct vgic_irq *irq;
482 int ret;
483
484 if (!vgic_supports_direct_msis(kvm))
485 return 0;
486
487
488
489
490
491 its = vgic_get_its(kvm, irq_entry);
492 if (IS_ERR(its))
493 return 0;
494
495 mutex_lock(&its->its_lock);
496
497 ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
498 irq_entry->msi.data, &irq);
499 if (ret)
500 goto out;
501
502 WARN_ON(!(irq->hw && irq->host_irq == virq));
503 if (irq->hw) {
504 atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
505 irq->hw = false;
506 ret = its_unmap_vlpi(virq);
507 }
508
509out:
510 mutex_unlock(&its->its_lock);
511 return ret;
512}
513