1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/kvm_host.h>
17#include <kvm/arm_vgic.h>
18#include <linux/uaccess.h>
19#include <asm/kvm_mmu.h>
20#include <asm/cputype.h>
21#include "vgic.h"
22
23
24
25int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
26 phys_addr_t addr, phys_addr_t alignment)
27{
28 if (addr & ~KVM_PHYS_MASK)
29 return -E2BIG;
30
31 if (!IS_ALIGNED(addr, alignment))
32 return -EINVAL;
33
34 if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
35 return -EEXIST;
36
37 return 0;
38}
39
40static int vgic_check_type(struct kvm *kvm, int type_needed)
41{
42 if (kvm->arch.vgic.vgic_model != type_needed)
43 return -ENODEV;
44 else
45 return 0;
46}
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
65{
66 int r = 0;
67 struct vgic_dist *vgic = &kvm->arch.vgic;
68 phys_addr_t *addr_ptr, alignment;
69
70 mutex_lock(&kvm->lock);
71 switch (type) {
72 case KVM_VGIC_V2_ADDR_TYPE_DIST:
73 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
74 addr_ptr = &vgic->vgic_dist_base;
75 alignment = SZ_4K;
76 break;
77 case KVM_VGIC_V2_ADDR_TYPE_CPU:
78 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
79 addr_ptr = &vgic->vgic_cpu_base;
80 alignment = SZ_4K;
81 break;
82 case KVM_VGIC_V3_ADDR_TYPE_DIST:
83 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
84 addr_ptr = &vgic->vgic_dist_base;
85 alignment = SZ_64K;
86 break;
87 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
88 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
89 if (r)
90 break;
91 if (write) {
92 r = vgic_v3_set_redist_base(kvm, *addr);
93 goto out;
94 }
95 addr_ptr = &vgic->vgic_redist_base;
96 break;
97 default:
98 r = -ENODEV;
99 }
100
101 if (r)
102 goto out;
103
104 if (write) {
105 r = vgic_check_ioaddr(kvm, addr_ptr, *addr, alignment);
106 if (!r)
107 *addr_ptr = *addr;
108 } else {
109 *addr = *addr_ptr;
110 }
111
112out:
113 mutex_unlock(&kvm->lock);
114 return r;
115}
116
117static int vgic_set_common_attr(struct kvm_device *dev,
118 struct kvm_device_attr *attr)
119{
120 int r;
121
122 switch (attr->group) {
123 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
124 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
125 u64 addr;
126 unsigned long type = (unsigned long)attr->attr;
127
128 if (copy_from_user(&addr, uaddr, sizeof(addr)))
129 return -EFAULT;
130
131 r = kvm_vgic_addr(dev->kvm, type, &addr, true);
132 return (r == -ENODEV) ? -ENXIO : r;
133 }
134 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
135 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
136 u32 val;
137 int ret = 0;
138
139 if (get_user(val, uaddr))
140 return -EFAULT;
141
142
143
144
145
146
147
148 if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
149 val > VGIC_MAX_RESERVED ||
150 (val & 31))
151 return -EINVAL;
152
153 mutex_lock(&dev->kvm->lock);
154
155 if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
156 ret = -EBUSY;
157 else
158 dev->kvm->arch.vgic.nr_spis =
159 val - VGIC_NR_PRIVATE_IRQS;
160
161 mutex_unlock(&dev->kvm->lock);
162
163 return ret;
164 }
165 case KVM_DEV_ARM_VGIC_GRP_CTRL: {
166 switch (attr->attr) {
167 case KVM_DEV_ARM_VGIC_CTRL_INIT:
168 mutex_lock(&dev->kvm->lock);
169 r = vgic_init(dev->kvm);
170 mutex_unlock(&dev->kvm->lock);
171 return r;
172 }
173 break;
174 }
175 }
176
177 return -ENXIO;
178}
179
180static int vgic_get_common_attr(struct kvm_device *dev,
181 struct kvm_device_attr *attr)
182{
183 int r = -ENXIO;
184
185 switch (attr->group) {
186 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
187 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
188 u64 addr;
189 unsigned long type = (unsigned long)attr->attr;
190
191 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
192 if (r)
193 return (r == -ENODEV) ? -ENXIO : r;
194
195 if (copy_to_user(uaddr, &addr, sizeof(addr)))
196 return -EFAULT;
197 break;
198 }
199 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
200 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
201
202 r = put_user(dev->kvm->arch.vgic.nr_spis +
203 VGIC_NR_PRIVATE_IRQS, uaddr);
204 break;
205 }
206 }
207
208 return r;
209}
210
211static int vgic_create(struct kvm_device *dev, u32 type)
212{
213 return kvm_vgic_create(dev->kvm, type);
214}
215
216static void vgic_destroy(struct kvm_device *dev)
217{
218 kfree(dev);
219}
220
221int kvm_register_vgic_device(unsigned long type)
222{
223 int ret = -ENODEV;
224
225 switch (type) {
226 case KVM_DEV_TYPE_ARM_VGIC_V2:
227 ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
228 KVM_DEV_TYPE_ARM_VGIC_V2);
229 break;
230 case KVM_DEV_TYPE_ARM_VGIC_V3:
231 ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
232 KVM_DEV_TYPE_ARM_VGIC_V3);
233
234 if (ret)
235 break;
236 ret = kvm_vgic_register_its_device();
237 break;
238 }
239
240 return ret;
241}
242
243int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
244 struct vgic_reg_attr *reg_attr)
245{
246 int cpuid;
247
248 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
249 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
250
251 if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
252 return -EINVAL;
253
254 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
255 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
256
257 return 0;
258}
259
260
261static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
262{
263 struct kvm_vcpu *tmp_vcpu;
264
265 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
266 tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
267 mutex_unlock(&tmp_vcpu->mutex);
268 }
269}
270
271void unlock_all_vcpus(struct kvm *kvm)
272{
273 unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
274}
275
276
277bool lock_all_vcpus(struct kvm *kvm)
278{
279 struct kvm_vcpu *tmp_vcpu;
280 int c;
281
282
283
284
285
286
287
288 kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
289 if (!mutex_trylock(&tmp_vcpu->mutex)) {
290 unlock_vcpus(kvm, c - 1);
291 return false;
292 }
293 }
294
295 return true;
296}
297
298
299
300
301
302
303
304
305
306static int vgic_v2_attr_regs_access(struct kvm_device *dev,
307 struct kvm_device_attr *attr,
308 u32 *reg, bool is_write)
309{
310 struct vgic_reg_attr reg_attr;
311 gpa_t addr;
312 struct kvm_vcpu *vcpu;
313 int ret;
314
315 ret = vgic_v2_parse_attr(dev, attr, ®_attr);
316 if (ret)
317 return ret;
318
319 vcpu = reg_attr.vcpu;
320 addr = reg_attr.addr;
321
322 mutex_lock(&dev->kvm->lock);
323
324 ret = vgic_init(dev->kvm);
325 if (ret)
326 goto out;
327
328 if (!lock_all_vcpus(dev->kvm)) {
329 ret = -EBUSY;
330 goto out;
331 }
332
333 switch (attr->group) {
334 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
335 ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
336 break;
337 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
338 ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg);
339 break;
340 default:
341 ret = -EINVAL;
342 break;
343 }
344
345 unlock_all_vcpus(dev->kvm);
346out:
347 mutex_unlock(&dev->kvm->lock);
348 return ret;
349}
350
351static int vgic_v2_set_attr(struct kvm_device *dev,
352 struct kvm_device_attr *attr)
353{
354 int ret;
355
356 ret = vgic_set_common_attr(dev, attr);
357 if (ret != -ENXIO)
358 return ret;
359
360 switch (attr->group) {
361 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
362 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
363 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
364 u32 reg;
365
366 if (get_user(reg, uaddr))
367 return -EFAULT;
368
369 return vgic_v2_attr_regs_access(dev, attr, ®, true);
370 }
371 }
372
373 return -ENXIO;
374}
375
376static int vgic_v2_get_attr(struct kvm_device *dev,
377 struct kvm_device_attr *attr)
378{
379 int ret;
380
381 ret = vgic_get_common_attr(dev, attr);
382 if (ret != -ENXIO)
383 return ret;
384
385 switch (attr->group) {
386 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
387 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
388 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
389 u32 reg = 0;
390
391 ret = vgic_v2_attr_regs_access(dev, attr, ®, false);
392 if (ret)
393 return ret;
394 return put_user(reg, uaddr);
395 }
396 }
397
398 return -ENXIO;
399}
400
401static int vgic_v2_has_attr(struct kvm_device *dev,
402 struct kvm_device_attr *attr)
403{
404 switch (attr->group) {
405 case KVM_DEV_ARM_VGIC_GRP_ADDR:
406 switch (attr->attr) {
407 case KVM_VGIC_V2_ADDR_TYPE_DIST:
408 case KVM_VGIC_V2_ADDR_TYPE_CPU:
409 return 0;
410 }
411 break;
412 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
413 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
414 return vgic_v2_has_attr_regs(dev, attr);
415 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
416 return 0;
417 case KVM_DEV_ARM_VGIC_GRP_CTRL:
418 switch (attr->attr) {
419 case KVM_DEV_ARM_VGIC_CTRL_INIT:
420 return 0;
421 }
422 }
423 return -ENXIO;
424}
425
426struct kvm_device_ops kvm_arm_vgic_v2_ops = {
427 .name = "kvm-arm-vgic-v2",
428 .create = vgic_create,
429 .destroy = vgic_destroy,
430 .set_attr = vgic_v2_set_attr,
431 .get_attr = vgic_v2_get_attr,
432 .has_attr = vgic_v2_has_attr,
433};
434
435int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
436 struct vgic_reg_attr *reg_attr)
437{
438 unsigned long vgic_mpidr, mpidr_reg;
439
440
441
442
443
444 if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) {
445 vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >>
446 KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT;
447
448 mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr);
449 reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg);
450 } else {
451 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0);
452 }
453
454 if (!reg_attr->vcpu)
455 return -EINVAL;
456
457 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
458
459 return 0;
460}
461
462
463
464
465
466
467
468
469
470static int vgic_v3_attr_regs_access(struct kvm_device *dev,
471 struct kvm_device_attr *attr,
472 u64 *reg, bool is_write)
473{
474 struct vgic_reg_attr reg_attr;
475 gpa_t addr;
476 struct kvm_vcpu *vcpu;
477 int ret;
478 u32 tmp32;
479
480 ret = vgic_v3_parse_attr(dev, attr, ®_attr);
481 if (ret)
482 return ret;
483
484 vcpu = reg_attr.vcpu;
485 addr = reg_attr.addr;
486
487 mutex_lock(&dev->kvm->lock);
488
489 if (unlikely(!vgic_initialized(dev->kvm))) {
490 ret = -EBUSY;
491 goto out;
492 }
493
494 if (!lock_all_vcpus(dev->kvm)) {
495 ret = -EBUSY;
496 goto out;
497 }
498
499 switch (attr->group) {
500 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
501 if (is_write)
502 tmp32 = *reg;
503
504 ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &tmp32);
505 if (!is_write)
506 *reg = tmp32;
507 break;
508 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
509 if (is_write)
510 tmp32 = *reg;
511
512 ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &tmp32);
513 if (!is_write)
514 *reg = tmp32;
515 break;
516 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
517 u64 regid;
518
519 regid = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK);
520 ret = vgic_v3_cpu_sysregs_uaccess(vcpu, is_write,
521 regid, reg);
522 break;
523 }
524 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
525 unsigned int info, intid;
526
527 info = (attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
528 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT;
529 if (info == VGIC_LEVEL_INFO_LINE_LEVEL) {
530 intid = attr->attr &
531 KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK;
532 ret = vgic_v3_line_level_info_uaccess(vcpu, is_write,
533 intid, reg);
534 } else {
535 ret = -EINVAL;
536 }
537 break;
538 }
539 default:
540 ret = -EINVAL;
541 break;
542 }
543
544 unlock_all_vcpus(dev->kvm);
545out:
546 mutex_unlock(&dev->kvm->lock);
547 return ret;
548}
549
550static int vgic_v3_set_attr(struct kvm_device *dev,
551 struct kvm_device_attr *attr)
552{
553 int ret;
554
555 ret = vgic_set_common_attr(dev, attr);
556 if (ret != -ENXIO)
557 return ret;
558
559 switch (attr->group) {
560 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
561 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
562 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
563 u32 tmp32;
564 u64 reg;
565
566 if (get_user(tmp32, uaddr))
567 return -EFAULT;
568
569 reg = tmp32;
570 return vgic_v3_attr_regs_access(dev, attr, ®, true);
571 }
572 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
573 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
574 u64 reg;
575
576 if (get_user(reg, uaddr))
577 return -EFAULT;
578
579 return vgic_v3_attr_regs_access(dev, attr, ®, true);
580 }
581 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
582 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
583 u64 reg;
584 u32 tmp32;
585
586 if (get_user(tmp32, uaddr))
587 return -EFAULT;
588
589 reg = tmp32;
590 return vgic_v3_attr_regs_access(dev, attr, ®, true);
591 }
592 case KVM_DEV_ARM_VGIC_GRP_CTRL: {
593 int ret;
594
595 switch (attr->attr) {
596 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
597 mutex_lock(&dev->kvm->lock);
598
599 if (!lock_all_vcpus(dev->kvm)) {
600 mutex_unlock(&dev->kvm->lock);
601 return -EBUSY;
602 }
603 ret = vgic_v3_save_pending_tables(dev->kvm);
604 unlock_all_vcpus(dev->kvm);
605 mutex_unlock(&dev->kvm->lock);
606 return ret;
607 }
608 break;
609 }
610 }
611 return -ENXIO;
612}
613
614static int vgic_v3_get_attr(struct kvm_device *dev,
615 struct kvm_device_attr *attr)
616{
617 int ret;
618
619 ret = vgic_get_common_attr(dev, attr);
620 if (ret != -ENXIO)
621 return ret;
622
623 switch (attr->group) {
624 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
625 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
626 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
627 u64 reg;
628 u32 tmp32;
629
630 ret = vgic_v3_attr_regs_access(dev, attr, ®, false);
631 if (ret)
632 return ret;
633 tmp32 = reg;
634 return put_user(tmp32, uaddr);
635 }
636 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
637 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
638 u64 reg;
639
640 ret = vgic_v3_attr_regs_access(dev, attr, ®, false);
641 if (ret)
642 return ret;
643 return put_user(reg, uaddr);
644 }
645 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
646 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
647 u64 reg;
648 u32 tmp32;
649
650 ret = vgic_v3_attr_regs_access(dev, attr, ®, false);
651 if (ret)
652 return ret;
653 tmp32 = reg;
654 return put_user(tmp32, uaddr);
655 }
656 }
657 return -ENXIO;
658}
659
660static int vgic_v3_has_attr(struct kvm_device *dev,
661 struct kvm_device_attr *attr)
662{
663 switch (attr->group) {
664 case KVM_DEV_ARM_VGIC_GRP_ADDR:
665 switch (attr->attr) {
666 case KVM_VGIC_V3_ADDR_TYPE_DIST:
667 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
668 return 0;
669 }
670 break;
671 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
672 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
673 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
674 return vgic_v3_has_attr_regs(dev, attr);
675 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
676 return 0;
677 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
678 if (((attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
679 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) ==
680 VGIC_LEVEL_INFO_LINE_LEVEL)
681 return 0;
682 break;
683 }
684 case KVM_DEV_ARM_VGIC_GRP_CTRL:
685 switch (attr->attr) {
686 case KVM_DEV_ARM_VGIC_CTRL_INIT:
687 return 0;
688 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
689 return 0;
690 }
691 }
692 return -ENXIO;
693}
694
695struct kvm_device_ops kvm_arm_vgic_v3_ops = {
696 .name = "kvm-arm-vgic-v3",
697 .create = vgic_create,
698 .destroy = vgic_destroy,
699 .set_attr = vgic_v3_set_attr,
700 .get_attr = vgic_v3_get_attr,
701 .has_attr = vgic_v3_has_attr,
702};
703