1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/interrupt.h>
19#include <linux/irq.h>
20#include <linux/irqdomain.h>
21#include <linux/kvm_host.h>
22#include <linux/irqchip/arm-gic-v3.h>
23
24#include "vgic.h"
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93#define DB_IRQ_FLAGS (IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY | IRQ_NO_BALANCING)
94
95static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
96{
97 struct kvm_vcpu *vcpu = info;
98
99 vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
100 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
101 kvm_vcpu_kick(vcpu);
102
103 return IRQ_HANDLED;
104}
105
106
107
108
109
110
111
112
113
114
115int vgic_v4_init(struct kvm *kvm)
116{
117 struct vgic_dist *dist = &kvm->arch.vgic;
118 struct kvm_vcpu *vcpu;
119 int i, nr_vcpus, ret;
120
121 if (!kvm_vgic_global_state.has_gicv4)
122 return 0;
123
124 if (dist->its_vm.vpes)
125 return 0;
126
127 nr_vcpus = atomic_read(&kvm->online_vcpus);
128
129 dist->its_vm.vpes = kzalloc(sizeof(*dist->its_vm.vpes) * nr_vcpus,
130 GFP_KERNEL);
131 if (!dist->its_vm.vpes)
132 return -ENOMEM;
133
134 dist->its_vm.nr_vpes = nr_vcpus;
135
136 kvm_for_each_vcpu(i, vcpu, kvm)
137 dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
138
139 ret = its_alloc_vcpu_irqs(&dist->its_vm);
140 if (ret < 0) {
141 kvm_err("VPE IRQ allocation failure\n");
142 kfree(dist->its_vm.vpes);
143 dist->its_vm.nr_vpes = 0;
144 dist->its_vm.vpes = NULL;
145 return ret;
146 }
147
148 kvm_for_each_vcpu(i, vcpu, kvm) {
149 int irq = dist->its_vm.vpes[i]->irq;
150
151
152
153
154
155
156
157
158 irq_set_status_flags(irq, DB_IRQ_FLAGS);
159 ret = request_irq(irq, vgic_v4_doorbell_handler,
160 0, "vcpu", vcpu);
161 if (ret) {
162 kvm_err("failed to allocate vcpu IRQ%d\n", irq);
163
164
165
166
167 dist->its_vm.nr_vpes = i;
168 break;
169 }
170 }
171
172 if (ret)
173 vgic_v4_teardown(kvm);
174
175 return ret;
176}
177
178
179
180
181
182
183
184void vgic_v4_teardown(struct kvm *kvm)
185{
186 struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
187 int i;
188
189 if (!its_vm->vpes)
190 return;
191
192 for (i = 0; i < its_vm->nr_vpes; i++) {
193 struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i);
194 int irq = its_vm->vpes[i]->irq;
195
196 irq_clear_status_flags(irq, DB_IRQ_FLAGS);
197 free_irq(irq, vcpu);
198 }
199
200 its_free_vcpu_irqs(its_vm);
201 kfree(its_vm->vpes);
202 its_vm->nr_vpes = 0;
203 its_vm->vpes = NULL;
204}
205
206int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu)
207{
208 if (!vgic_supports_direct_msis(vcpu->kvm))
209 return 0;
210
211 return its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, false);
212}
213
214int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu)
215{
216 int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
217 int err;
218
219 if (!vgic_supports_direct_msis(vcpu->kvm))
220 return 0;
221
222
223
224
225
226
227
228 err = irq_set_affinity(irq, cpumask_of(smp_processor_id()));
229 if (err)
230 return err;
231
232 err = its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, true);
233 if (err)
234 return err;
235
236
237
238
239
240 err = irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, false);
241
242 return err;
243}
244
245static struct vgic_its *vgic_get_its(struct kvm *kvm,
246 struct kvm_kernel_irq_routing_entry *irq_entry)
247{
248 struct kvm_msi msi = (struct kvm_msi) {
249 .address_lo = irq_entry->msi.address_lo,
250 .address_hi = irq_entry->msi.address_hi,
251 .data = irq_entry->msi.data,
252 .flags = irq_entry->msi.flags,
253 .devid = irq_entry->msi.devid,
254 };
255
256 return vgic_msi_to_its(kvm, &msi);
257}
258
259int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
260 struct kvm_kernel_irq_routing_entry *irq_entry)
261{
262 struct vgic_its *its;
263 struct vgic_irq *irq;
264 struct its_vlpi_map map;
265 int ret;
266
267 if (!vgic_supports_direct_msis(kvm))
268 return 0;
269
270
271
272
273
274 its = vgic_get_its(kvm, irq_entry);
275 if (IS_ERR(its))
276 return 0;
277
278 mutex_lock(&its->its_lock);
279
280
281 ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
282 irq_entry->msi.data, &irq);
283 if (ret)
284 goto out;
285
286
287
288
289
290
291
292 map = (struct its_vlpi_map) {
293 .vm = &kvm->arch.vgic.its_vm,
294 .vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe,
295 .vintid = irq->intid,
296 .properties = ((irq->priority & 0xfc) |
297 (irq->enabled ? LPI_PROP_ENABLED : 0) |
298 LPI_PROP_GROUP1),
299 .db_enabled = true,
300 };
301
302 ret = its_map_vlpi(virq, &map);
303 if (ret)
304 goto out;
305
306 irq->hw = true;
307 irq->host_irq = virq;
308
309out:
310 mutex_unlock(&its->its_lock);
311 return ret;
312}
313
314int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
315 struct kvm_kernel_irq_routing_entry *irq_entry)
316{
317 struct vgic_its *its;
318 struct vgic_irq *irq;
319 int ret;
320
321 if (!vgic_supports_direct_msis(kvm))
322 return 0;
323
324
325
326
327
328 its = vgic_get_its(kvm, irq_entry);
329 if (IS_ERR(its))
330 return 0;
331
332 mutex_lock(&its->its_lock);
333
334 ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
335 irq_entry->msi.data, &irq);
336 if (ret)
337 goto out;
338
339 WARN_ON(!(irq->hw && irq->host_irq == virq));
340 if (irq->hw) {
341 irq->hw = false;
342 ret = its_unmap_vlpi(virq);
343 }
344
345out:
346 mutex_unlock(&its->its_lock);
347 return ret;
348}
349
350void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu)
351{
352 if (vgic_supports_direct_msis(vcpu->kvm)) {
353 int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
354 if (irq)
355 enable_irq(irq);
356 }
357}
358
359void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu)
360{
361 if (vgic_supports_direct_msis(vcpu->kvm)) {
362 int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
363 if (irq)
364 disable_irq(irq);
365 }
366}
367