1
2
3
4
5
6#define pr_fmt(fmt) "xive-kvm: " fmt
7
8#include <linux/kernel.h>
9#include <linux/kvm_host.h>
10#include <linux/err.h>
11#include <linux/gfp.h>
12#include <linux/spinlock.h>
13#include <linux/delay.h>
14#include <linux/percpu.h>
15#include <linux/cpumask.h>
16#include <linux/uaccess.h>
17#include <asm/kvm_book3s.h>
18#include <asm/kvm_ppc.h>
19#include <asm/hvcall.h>
20#include <asm/xics.h>
21#include <asm/xive.h>
22#include <asm/xive-regs.h>
23#include <asm/debug.h>
24#include <asm/debugfs.h>
25#include <asm/time.h>
26#include <asm/opal.h>
27
28#include <linux/debugfs.h>
29#include <linux/seq_file.h>
30
31#include "book3s_xive.h"
32
33
34
35
36
37
38
39
40
41#define XIVE_RUNTIME_CHECKS
42#define X_PFX xive_vm_
43#define X_STATIC static
44#define X_STAT_PFX stat_vm_
45#define __x_tima xive_tima
46#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
47#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
48#define __x_writeb __raw_writeb
49#define __x_readw __raw_readw
50#define __x_readq __raw_readq
51#define __x_writeq __raw_writeq
52
53#include "book3s_xive_template.c"
54
55
56
57
58
59#define XIVE_Q_GAP 2
60
61
62
63
64
65void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
66{
67 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
68 u64 pq;
69
70 if (!tima)
71 return;
72 eieio();
73 __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
74 __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
75 vcpu->arch.xive_pushed = 1;
76 eieio();
77
78
79
80
81
82
83
84
85 vcpu->arch.irq_pending = 0;
86
87
88
89
90
91 if (vcpu->arch.xive_esc_on) {
92 pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
93 XIVE_ESB_SET_PQ_01));
94 mb();
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117 if (!(pq & XIVE_ESB_VAL_P))
118
119 vcpu->arch.xive_esc_on = 0;
120 }
121}
122EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
123
124
125
126
127
128static bool xive_irq_trigger(struct xive_irq_data *xd)
129{
130
131 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
132 return false;
133
134
135 if (WARN_ON(!xd->trig_mmio))
136 return false;
137
138 out_be64(xd->trig_mmio, 0);
139
140 return true;
141}
142
143static irqreturn_t xive_esc_irq(int irq, void *data)
144{
145 struct kvm_vcpu *vcpu = data;
146
147 vcpu->arch.irq_pending = 1;
148 smp_mb();
149 if (vcpu->arch.ceded)
150 kvmppc_fast_vcpu_kick(vcpu);
151
152
153
154
155
156
157
158
159
160
161 vcpu->arch.xive_esc_on = false;
162
163 return IRQ_HANDLED;
164}
165
166int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
167 bool single_escalation)
168{
169 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
170 struct xive_q *q = &xc->queues[prio];
171 char *name = NULL;
172 int rc;
173
174
175 if (xc->esc_virq[prio])
176 return 0;
177
178
179 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
180 if (!xc->esc_virq[prio]) {
181 pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
182 prio, xc->server_num);
183 return -EIO;
184 }
185
186 if (single_escalation)
187 name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
188 vcpu->kvm->arch.lpid, xc->server_num);
189 else
190 name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
191 vcpu->kvm->arch.lpid, xc->server_num, prio);
192 if (!name) {
193 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
194 prio, xc->server_num);
195 rc = -ENOMEM;
196 goto error;
197 }
198
199 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
200
201 rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
202 IRQF_NO_THREAD, name, vcpu);
203 if (rc) {
204 pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
205 prio, xc->server_num);
206 goto error;
207 }
208 xc->esc_virq_names[prio] = name;
209
210
211
212
213
214
215
216
217
218 if (single_escalation) {
219 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
220 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
221
222 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
223 vcpu->arch.xive_esc_raddr = xd->eoi_page;
224 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
225 xd->flags |= XIVE_IRQ_NO_EOI;
226 }
227
228 return 0;
229error:
230 irq_dispose_mapping(xc->esc_virq[prio]);
231 xc->esc_virq[prio] = 0;
232 kfree(name);
233 return rc;
234}
235
236static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
237{
238 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
239 struct kvmppc_xive *xive = xc->xive;
240 struct xive_q *q = &xc->queues[prio];
241 void *qpage;
242 int rc;
243
244 if (WARN_ON(q->qpage))
245 return 0;
246
247
248 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
249 if (!qpage) {
250 pr_err("Failed to allocate queue %d for VCPU %d\n",
251 prio, xc->server_num);
252 return -ENOMEM;
253 }
254 memset(qpage, 0, 1 << xive->q_order);
255
256
257
258
259
260
261
262
263 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
264 xive->q_order, true);
265 if (rc)
266 pr_err("Failed to configure queue %d for VCPU %d\n",
267 prio, xc->server_num);
268 return rc;
269}
270
271
272static int xive_check_provisioning(struct kvm *kvm, u8 prio)
273{
274 struct kvmppc_xive *xive = kvm->arch.xive;
275 struct kvm_vcpu *vcpu;
276 int i, rc;
277
278 lockdep_assert_held(&xive->lock);
279
280
281 if (xive->qmap & (1 << prio))
282 return 0;
283
284 pr_devel("Provisioning prio... %d\n", prio);
285
286
287 kvm_for_each_vcpu(i, vcpu, kvm) {
288 if (!vcpu->arch.xive_vcpu)
289 continue;
290 rc = xive_provision_queue(vcpu, prio);
291 if (rc == 0 && !xive->single_escalation)
292 kvmppc_xive_attach_escalation(vcpu, prio,
293 xive->single_escalation);
294 if (rc)
295 return rc;
296 }
297
298
299 mb();
300 xive->qmap |= (1 << prio);
301 return 0;
302}
303
304static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
305{
306 struct kvm_vcpu *vcpu;
307 struct kvmppc_xive_vcpu *xc;
308 struct xive_q *q;
309
310
311 vcpu = kvmppc_xive_find_server(kvm, server);
312 if (!vcpu) {
313 pr_warn("%s: Can't find server %d\n", __func__, server);
314 return;
315 }
316 xc = vcpu->arch.xive_vcpu;
317 if (WARN_ON(!xc))
318 return;
319
320 q = &xc->queues[prio];
321 atomic_inc(&q->pending_count);
322}
323
324static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
325{
326 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
327 struct xive_q *q;
328 u32 max;
329
330 if (WARN_ON(!xc))
331 return -ENXIO;
332 if (!xc->valid)
333 return -ENXIO;
334
335 q = &xc->queues[prio];
336 if (WARN_ON(!q->qpage))
337 return -ENXIO;
338
339
340 max = (q->msk + 1) - XIVE_Q_GAP;
341 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
342}
343
344int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
345{
346 struct kvm_vcpu *vcpu;
347 int i, rc;
348
349
350 vcpu = kvmppc_xive_find_server(kvm, *server);
351 if (!vcpu) {
352 pr_devel("Can't find server %d\n", *server);
353 return -EINVAL;
354 }
355
356 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
357
358
359 rc = xive_try_pick_queue(vcpu, prio);
360 if (rc == 0)
361 return rc;
362
363 pr_devel(" .. failed, looking up candidate...\n");
364
365
366 kvm_for_each_vcpu(i, vcpu, kvm) {
367 if (!vcpu->arch.xive_vcpu)
368 continue;
369 rc = xive_try_pick_queue(vcpu, prio);
370 if (rc == 0) {
371 *server = vcpu->arch.xive_vcpu->server_num;
372 pr_devel(" found on 0x%x/%d\n", *server, prio);
373 return rc;
374 }
375 }
376 pr_devel(" no available target !\n");
377
378
379 return -EBUSY;
380}
381
382static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
383 struct kvmppc_xive_src_block *sb,
384 struct kvmppc_xive_irq_state *state)
385{
386 struct xive_irq_data *xd;
387 u32 hw_num;
388 u8 old_prio;
389 u64 val;
390
391
392
393
394
395 for (;;) {
396 arch_spin_lock(&sb->lock);
397 old_prio = state->guest_priority;
398 state->guest_priority = MASKED;
399 mb();
400 if (!state->in_eoi)
401 break;
402 state->guest_priority = old_prio;
403 arch_spin_unlock(&sb->lock);
404 }
405
406
407 if (old_prio == MASKED)
408 return old_prio;
409
410
411 kvmppc_xive_select_irq(state, &hw_num, &xd);
412
413
414
415
416
417
418
419
420
421
422
423
424
425 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
426 xive_native_configure_irq(hw_num,
427 kvmppc_xive_vp(xive, state->act_server),
428 MASKED, state->number);
429
430 state->old_p = true;
431 state->old_q = false;
432 } else {
433
434 val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
435 state->old_p = !!(val & 2);
436 state->old_q = !!(val & 1);
437
438
439
440
441
442 xive_native_sync_source(hw_num);
443 }
444
445 return old_prio;
446}
447
448static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
449 struct kvmppc_xive_irq_state *state)
450{
451
452
453
454 for (;;) {
455 arch_spin_lock(&sb->lock);
456 if (!state->in_eoi)
457 break;
458 arch_spin_unlock(&sb->lock);
459 }
460}
461
462static void xive_finish_unmask(struct kvmppc_xive *xive,
463 struct kvmppc_xive_src_block *sb,
464 struct kvmppc_xive_irq_state *state,
465 u8 prio)
466{
467 struct xive_irq_data *xd;
468 u32 hw_num;
469
470
471 if (state->guest_priority != MASKED)
472 goto bail;
473
474
475 kvmppc_xive_select_irq(state, &hw_num, &xd);
476
477
478
479
480
481 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
482 xive_native_configure_irq(hw_num,
483 kvmppc_xive_vp(xive, state->act_server),
484 state->act_priority, state->number);
485
486 if (!state->old_p)
487 xive_vm_source_eoi(hw_num, xd);
488
489 if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
490 xive_irq_trigger(xd);
491 goto bail;
492 }
493
494
495 if (state->old_q)
496 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
497
498
499
500
501
502
503 if (!state->old_p)
504 xive_vm_source_eoi(hw_num, xd);
505
506
507 mb();
508bail:
509 state->guest_priority = prio;
510}
511
512
513
514
515
516
517
518
519static int xive_target_interrupt(struct kvm *kvm,
520 struct kvmppc_xive_irq_state *state,
521 u32 server, u8 prio)
522{
523 struct kvmppc_xive *xive = kvm->arch.xive;
524 u32 hw_num;
525 int rc;
526
527
528
529
530
531
532 rc = kvmppc_xive_select_target(kvm, &server, prio);
533
534
535
536
537
538 if (rc)
539 return rc;
540
541
542
543
544
545
546 if (state->act_priority != MASKED)
547 xive_inc_q_pending(kvm,
548 state->act_server,
549 state->act_priority);
550
551
552
553 state->act_priority = prio;
554 state->act_server = server;
555
556
557 kvmppc_xive_select_irq(state, &hw_num, NULL);
558
559 return xive_native_configure_irq(hw_num,
560 kvmppc_xive_vp(xive, server),
561 prio, state->number);
562}
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
605 u32 priority)
606{
607 struct kvmppc_xive *xive = kvm->arch.xive;
608 struct kvmppc_xive_src_block *sb;
609 struct kvmppc_xive_irq_state *state;
610 u8 new_act_prio;
611 int rc = 0;
612 u16 idx;
613
614 if (!xive)
615 return -ENODEV;
616
617 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
618 irq, server, priority);
619
620
621 if (priority != MASKED) {
622 mutex_lock(&xive->lock);
623 rc = xive_check_provisioning(xive->kvm,
624 xive_prio_from_guest(priority));
625 mutex_unlock(&xive->lock);
626 }
627 if (rc) {
628 pr_devel(" provisioning failure %d !\n", rc);
629 return rc;
630 }
631
632 sb = kvmppc_xive_find_source(xive, irq, &idx);
633 if (!sb)
634 return -EINVAL;
635 state = &sb->irq_state[idx];
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651 if (priority == MASKED)
652 xive_lock_and_mask(xive, sb, state);
653 else
654 xive_lock_for_unmask(sb, state);
655
656
657
658
659
660
661
662 new_act_prio = state->act_priority;
663 if (priority != MASKED)
664 new_act_prio = xive_prio_from_guest(priority);
665
666 pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
667 new_act_prio, state->act_server, state->act_priority);
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683 if (new_act_prio != MASKED &&
684 (state->act_server != server ||
685 state->act_priority != new_act_prio))
686 rc = xive_target_interrupt(kvm, state, server, new_act_prio);
687
688
689
690
691
692 if (priority != MASKED)
693 xive_finish_unmask(xive, sb, state, priority);
694
695
696
697
698
699 state->saved_priority = priority;
700
701 arch_spin_unlock(&sb->lock);
702 return rc;
703}
704
705int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
706 u32 *priority)
707{
708 struct kvmppc_xive *xive = kvm->arch.xive;
709 struct kvmppc_xive_src_block *sb;
710 struct kvmppc_xive_irq_state *state;
711 u16 idx;
712
713 if (!xive)
714 return -ENODEV;
715
716 sb = kvmppc_xive_find_source(xive, irq, &idx);
717 if (!sb)
718 return -EINVAL;
719 state = &sb->irq_state[idx];
720 arch_spin_lock(&sb->lock);
721 *server = state->act_server;
722 *priority = state->guest_priority;
723 arch_spin_unlock(&sb->lock);
724
725 return 0;
726}
727
728int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
729{
730 struct kvmppc_xive *xive = kvm->arch.xive;
731 struct kvmppc_xive_src_block *sb;
732 struct kvmppc_xive_irq_state *state;
733 u16 idx;
734
735 if (!xive)
736 return -ENODEV;
737
738 sb = kvmppc_xive_find_source(xive, irq, &idx);
739 if (!sb)
740 return -EINVAL;
741 state = &sb->irq_state[idx];
742
743 pr_devel("int_on(irq=0x%x)\n", irq);
744
745
746
747
748 if (state->act_priority == MASKED) {
749 pr_devel("int_on on untargetted interrupt\n");
750 return -EINVAL;
751 }
752
753
754 if (state->saved_priority == MASKED)
755 return 0;
756
757
758
759
760 xive_lock_for_unmask(sb, state);
761 xive_finish_unmask(xive, sb, state, state->saved_priority);
762 arch_spin_unlock(&sb->lock);
763
764 return 0;
765}
766
767int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
768{
769 struct kvmppc_xive *xive = kvm->arch.xive;
770 struct kvmppc_xive_src_block *sb;
771 struct kvmppc_xive_irq_state *state;
772 u16 idx;
773
774 if (!xive)
775 return -ENODEV;
776
777 sb = kvmppc_xive_find_source(xive, irq, &idx);
778 if (!sb)
779 return -EINVAL;
780 state = &sb->irq_state[idx];
781
782 pr_devel("int_off(irq=0x%x)\n", irq);
783
784
785
786
787 state->saved_priority = xive_lock_and_mask(xive, sb, state);
788 arch_spin_unlock(&sb->lock);
789
790 return 0;
791}
792
793static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
794{
795 struct kvmppc_xive_src_block *sb;
796 struct kvmppc_xive_irq_state *state;
797 u16 idx;
798
799 sb = kvmppc_xive_find_source(xive, irq, &idx);
800 if (!sb)
801 return false;
802 state = &sb->irq_state[idx];
803 if (!state->valid)
804 return false;
805
806
807
808
809
810 xive_irq_trigger(&state->ipi_data);
811
812 return true;
813}
814
815u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
816{
817 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
818
819 if (!xc)
820 return 0;
821
822
823 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
824 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
825 (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
826}
827
828int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
829{
830 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
831 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
832 u8 cppr, mfrr;
833 u32 xisr;
834
835 if (!xc || !xive)
836 return -ENOENT;
837
838
839 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
840 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
841 KVM_REG_PPC_ICP_XISR_MASK;
842 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
843
844 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
845 xc->server_num, cppr, mfrr, xisr);
846
847
848
849
850
851
852 if (WARN_ON(vcpu->arch.xive_pushed))
853 return -EIO;
854
855
856 vcpu->arch.xive_saved_state.cppr = cppr;
857 xc->hw_cppr = xc->cppr = cppr;
858
859
860
861
862
863
864
865 xc->mfrr = mfrr;
866 if (mfrr < cppr)
867 xive_irq_trigger(&xc->vp_ipi_data);
868
869
870
871
872
873
874
875
876
877
878 if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
879 xc->delayed_irq = xisr;
880 xive->delayed_irqs++;
881 pr_devel(" xisr restore delayed\n");
882 }
883
884 return 0;
885}
886
887int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
888 struct irq_desc *host_desc)
889{
890 struct kvmppc_xive *xive = kvm->arch.xive;
891 struct kvmppc_xive_src_block *sb;
892 struct kvmppc_xive_irq_state *state;
893 struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
894 unsigned int host_irq = irq_desc_get_irq(host_desc);
895 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
896 u16 idx;
897 u8 prio;
898 int rc;
899
900 if (!xive)
901 return -ENODEV;
902
903 pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
904
905 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
906 if (!sb)
907 return -EINVAL;
908 state = &sb->irq_state[idx];
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923 rc = irq_set_vcpu_affinity(host_irq, state);
924 if (rc) {
925 pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
926 return rc;
927 }
928
929
930
931
932
933
934 prio = xive_lock_and_mask(xive, sb, state);
935 pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
936 state->old_p, state->old_q);
937
938
939 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
940
941
942
943
944
945 if (xive->ops && xive->ops->reset_mapped)
946 xive->ops->reset_mapped(kvm, guest_irq);
947
948
949 state->pt_number = hw_irq;
950 state->pt_data = irq_data_get_irq_handler_data(host_data);
951
952
953
954
955
956
957
958 xive_native_configure_irq(hw_irq,
959 kvmppc_xive_vp(xive, state->act_server),
960 state->act_priority, state->number);
961
962
963
964
965
966
967
968
969 if (prio != MASKED && !state->old_p)
970 xive_vm_source_eoi(hw_irq, state->pt_data);
971
972
973 state->old_p = state->old_q = false;
974
975
976 mb();
977 state->guest_priority = prio;
978 arch_spin_unlock(&sb->lock);
979
980 return 0;
981}
982EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
983
984int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
985 struct irq_desc *host_desc)
986{
987 struct kvmppc_xive *xive = kvm->arch.xive;
988 struct kvmppc_xive_src_block *sb;
989 struct kvmppc_xive_irq_state *state;
990 unsigned int host_irq = irq_desc_get_irq(host_desc);
991 u16 idx;
992 u8 prio;
993 int rc;
994
995 if (!xive)
996 return -ENODEV;
997
998 pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
999
1000 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
1001 if (!sb)
1002 return -EINVAL;
1003 state = &sb->irq_state[idx];
1004
1005
1006
1007
1008
1009
1010 prio = xive_lock_and_mask(xive, sb, state);
1011 pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
1012 state->old_p, state->old_q);
1013
1014
1015
1016
1017
1018
1019 if (state->old_p)
1020 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
1021
1022
1023 rc = irq_set_vcpu_affinity(host_irq, NULL);
1024 if (rc) {
1025 pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
1026 return rc;
1027 }
1028
1029
1030 state->pt_number = 0;
1031 state->pt_data = NULL;
1032
1033
1034
1035
1036
1037 if (xive->ops && xive->ops->reset_mapped) {
1038 xive->ops->reset_mapped(kvm, guest_irq);
1039 }
1040
1041
1042 xive_native_configure_irq(state->ipi_number,
1043 kvmppc_xive_vp(xive, state->act_server),
1044 state->act_priority, state->number);
1045
1046
1047
1048
1049
1050
1051 if (prio == MASKED || state->old_p)
1052 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
1053 else
1054 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
1055
1056
1057 mb();
1058 state->guest_priority = prio;
1059 arch_spin_unlock(&sb->lock);
1060
1061 return 0;
1062}
1063EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
1064
1065void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
1066{
1067 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1068 struct kvm *kvm = vcpu->kvm;
1069 struct kvmppc_xive *xive = kvm->arch.xive;
1070 int i, j;
1071
1072 for (i = 0; i <= xive->max_sbid; i++) {
1073 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1074
1075 if (!sb)
1076 continue;
1077 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
1078 struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
1079
1080 if (!state->valid)
1081 continue;
1082 if (state->act_priority == MASKED)
1083 continue;
1084 if (state->act_server != xc->server_num)
1085 continue;
1086
1087
1088 arch_spin_lock(&sb->lock);
1089 state->act_priority = MASKED;
1090 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1091 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
1092 if (state->pt_number) {
1093 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
1094 xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
1095 }
1096 arch_spin_unlock(&sb->lock);
1097 }
1098 }
1099
1100
1101 if (vcpu->arch.xive_esc_on) {
1102 __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
1103 XIVE_ESB_SET_PQ_01));
1104 vcpu->arch.xive_esc_on = false;
1105 }
1106
1107
1108
1109
1110
1111
1112 vcpu->arch.xive_esc_vaddr = 0;
1113 vcpu->arch.xive_esc_raddr = 0;
1114}
1115
1116void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1117{
1118 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1119 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1120 int i;
1121
1122 if (!kvmppc_xics_enabled(vcpu))
1123 return;
1124
1125 if (!xc)
1126 return;
1127
1128 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
1129
1130
1131 xc->valid = false;
1132 kvmppc_xive_disable_vcpu_interrupts(vcpu);
1133
1134
1135 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1136
1137
1138 xive_native_disable_vp(xc->vp_id);
1139
1140
1141 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1142 struct xive_q *q = &xc->queues[i];
1143
1144
1145 if (xc->esc_virq[i]) {
1146 free_irq(xc->esc_virq[i], vcpu);
1147 irq_dispose_mapping(xc->esc_virq[i]);
1148 kfree(xc->esc_virq_names[i]);
1149 }
1150
1151 xive_native_disable_queue(xc->vp_id, q, i);
1152 if (q->qpage) {
1153 free_pages((unsigned long)q->qpage,
1154 xive->q_page_order);
1155 q->qpage = NULL;
1156 }
1157 }
1158
1159
1160 if (xc->vp_ipi) {
1161 xive_cleanup_irq_data(&xc->vp_ipi_data);
1162 xive_native_free_irq(xc->vp_ipi);
1163 }
1164
1165 kfree(xc);
1166
1167
1168 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1169 vcpu->arch.xive_vcpu = NULL;
1170}
1171
1172int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1173 struct kvm_vcpu *vcpu, u32 cpu)
1174{
1175 struct kvmppc_xive *xive = dev->private;
1176 struct kvmppc_xive_vcpu *xc;
1177 int i, r = -EBUSY;
1178
1179 pr_devel("connect_vcpu(cpu=%d)\n", cpu);
1180
1181 if (dev->ops != &kvm_xive_ops) {
1182 pr_devel("Wrong ops !\n");
1183 return -EPERM;
1184 }
1185 if (xive->kvm != vcpu->kvm)
1186 return -EPERM;
1187 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
1188 return -EBUSY;
1189 if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
1190 pr_devel("Duplicate !\n");
1191 return -EEXIST;
1192 }
1193 if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
1194 pr_devel("Out of bounds !\n");
1195 return -EINVAL;
1196 }
1197 xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1198 if (!xc)
1199 return -ENOMEM;
1200
1201
1202 mutex_lock(&xive->lock);
1203 vcpu->arch.xive_vcpu = xc;
1204 xc->xive = xive;
1205 xc->vcpu = vcpu;
1206 xc->server_num = cpu;
1207 xc->vp_id = kvmppc_xive_vp(xive, cpu);
1208 xc->mfrr = 0xff;
1209 xc->valid = true;
1210
1211 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1212 if (r)
1213 goto bail;
1214
1215
1216 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
1217 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1218
1219
1220 xc->vp_ipi = xive_native_alloc_irq();
1221 if (!xc->vp_ipi) {
1222 pr_err("Failed to allocate xive irq for VCPU IPI\n");
1223 r = -EIO;
1224 goto bail;
1225 }
1226 pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1227
1228 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1229 if (r)
1230 goto bail;
1231
1232
1233
1234
1235
1236 r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
1237 if (r) {
1238 pr_err("Failed to enable VP in OPAL, err %d\n", r);
1239 goto bail;
1240 }
1241
1242
1243
1244
1245
1246
1247
1248
1249 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1250 struct xive_q *q = &xc->queues[i];
1251
1252
1253 if (i == 7 && xive->single_escalation)
1254 break;
1255
1256
1257 if (xive->qmap & (1 << i)) {
1258 r = xive_provision_queue(vcpu, i);
1259 if (r == 0 && !xive->single_escalation)
1260 kvmppc_xive_attach_escalation(
1261 vcpu, i, xive->single_escalation);
1262 if (r)
1263 goto bail;
1264 } else {
1265 r = xive_native_configure_queue(xc->vp_id,
1266 q, i, NULL, 0, true);
1267 if (r) {
1268 pr_err("Failed to configure queue %d for VCPU %d\n",
1269 i, cpu);
1270 goto bail;
1271 }
1272 }
1273 }
1274
1275
1276 r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation);
1277 if (r)
1278 goto bail;
1279
1280
1281 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
1282 if (!r)
1283 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
1284
1285bail:
1286 mutex_unlock(&xive->lock);
1287 if (r) {
1288 kvmppc_xive_cleanup_vcpu(vcpu);
1289 return r;
1290 }
1291
1292 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1293 return 0;
1294}
1295
1296
1297
1298
1299static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
1300{
1301 struct kvmppc_xive_src_block *sb;
1302 struct kvmppc_xive_irq_state *state;
1303 u16 idx;
1304
1305 sb = kvmppc_xive_find_source(xive, irq, &idx);
1306 if (!sb)
1307 return;
1308
1309 state = &sb->irq_state[idx];
1310
1311
1312 if (!state->valid) {
1313 pr_err("invalid irq 0x%x in cpu queue!\n", irq);
1314 return;
1315 }
1316
1317
1318
1319
1320
1321
1322 if (!state->saved_p)
1323 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
1324
1325
1326 state->in_queue = true;
1327}
1328
1329static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
1330 struct kvmppc_xive_src_block *sb,
1331 u32 irq)
1332{
1333 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1334
1335 if (!state->valid)
1336 return;
1337
1338
1339 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
1340
1341
1342 state->saved_p = state->old_p;
1343 state->saved_q = state->old_q;
1344
1345
1346 arch_spin_unlock(&sb->lock);
1347}
1348
1349static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
1350 struct kvmppc_xive_src_block *sb,
1351 u32 irq)
1352{
1353 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1354
1355 if (!state->valid)
1356 return;
1357
1358
1359
1360
1361
1362
1363 xive_lock_for_unmask(sb, state);
1364
1365
1366 if (state->saved_scan_prio != MASKED)
1367 xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
1368
1369
1370 arch_spin_unlock(&sb->lock);
1371}
1372
1373static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
1374{
1375 u32 idx = q->idx;
1376 u32 toggle = q->toggle;
1377 u32 irq;
1378
1379 do {
1380 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
1381 if (irq > XICS_IPI)
1382 xive_pre_save_set_queued(xive, irq);
1383 } while(irq);
1384}
1385
1386static void xive_pre_save_scan(struct kvmppc_xive *xive)
1387{
1388 struct kvm_vcpu *vcpu = NULL;
1389 int i, j;
1390
1391
1392
1393
1394
1395 for (i = 0; i <= xive->max_sbid; i++) {
1396 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1397 if (!sb)
1398 continue;
1399 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1400 xive_pre_save_mask_irq(xive, sb, j);
1401 }
1402
1403
1404 kvm_for_each_vcpu(i, vcpu, xive->kvm) {
1405 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1406 if (!xc)
1407 continue;
1408 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
1409 if (xc->queues[j].qpage)
1410 xive_pre_save_queue(xive, &xc->queues[j]);
1411 }
1412 }
1413
1414
1415 for (i = 0; i <= xive->max_sbid; i++) {
1416 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1417 if (!sb)
1418 continue;
1419 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1420 xive_pre_save_unmask_irq(xive, sb, j);
1421 }
1422}
1423
1424static void xive_post_save_scan(struct kvmppc_xive *xive)
1425{
1426 u32 i, j;
1427
1428
1429 for (i = 0; i <= xive->max_sbid; i++) {
1430 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1431 if (!sb)
1432 continue;
1433 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1434 sb->irq_state[j].in_queue = false;
1435 }
1436
1437
1438 xive->saved_src_count = 0;
1439}
1440
1441
1442
1443
1444static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1445{
1446 struct kvmppc_xive_src_block *sb;
1447 struct kvmppc_xive_irq_state *state;
1448 u64 __user *ubufp = (u64 __user *) addr;
1449 u64 val, prio;
1450 u16 idx;
1451
1452 sb = kvmppc_xive_find_source(xive, irq, &idx);
1453 if (!sb)
1454 return -ENOENT;
1455
1456 state = &sb->irq_state[idx];
1457
1458 if (!state->valid)
1459 return -ENOENT;
1460
1461 pr_devel("get_source(%ld)...\n", irq);
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479 if (xive->saved_src_count == 0)
1480 xive_pre_save_scan(xive);
1481 xive->saved_src_count++;
1482
1483
1484 val = state->act_server;
1485 prio = state->saved_scan_prio;
1486
1487 if (prio == MASKED) {
1488 val |= KVM_XICS_MASKED;
1489 prio = state->saved_priority;
1490 }
1491 val |= prio << KVM_XICS_PRIORITY_SHIFT;
1492 if (state->lsi) {
1493 val |= KVM_XICS_LEVEL_SENSITIVE;
1494 if (state->saved_p)
1495 val |= KVM_XICS_PENDING;
1496 } else {
1497 if (state->saved_p)
1498 val |= KVM_XICS_PRESENTED;
1499
1500 if (state->saved_q)
1501 val |= KVM_XICS_QUEUED;
1502
1503
1504
1505
1506
1507
1508
1509 if (state->in_queue || (prio == MASKED && state->saved_q))
1510 val |= KVM_XICS_PENDING;
1511 }
1512
1513
1514
1515
1516
1517 if (xive->saved_src_count == xive->src_count)
1518 xive_post_save_scan(xive);
1519
1520
1521 if (put_user(val, ubufp))
1522 return -EFAULT;
1523
1524 return 0;
1525}
1526
1527struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
1528 struct kvmppc_xive *xive, int irq)
1529{
1530 struct kvmppc_xive_src_block *sb;
1531 int i, bid;
1532
1533 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
1534
1535 mutex_lock(&xive->lock);
1536
1537
1538 if (xive->src_blocks[bid])
1539 goto out;
1540
1541
1542 sb = kzalloc(sizeof(*sb), GFP_KERNEL);
1543 if (!sb)
1544 goto out;
1545
1546 sb->id = bid;
1547
1548 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1549 sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
1550 sb->irq_state[i].eisn = 0;
1551 sb->irq_state[i].guest_priority = MASKED;
1552 sb->irq_state[i].saved_priority = MASKED;
1553 sb->irq_state[i].act_priority = MASKED;
1554 }
1555 smp_wmb();
1556 xive->src_blocks[bid] = sb;
1557
1558 if (bid > xive->max_sbid)
1559 xive->max_sbid = bid;
1560
1561out:
1562 mutex_unlock(&xive->lock);
1563 return xive->src_blocks[bid];
1564}
1565
1566static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
1567{
1568 struct kvm *kvm = xive->kvm;
1569 struct kvm_vcpu *vcpu = NULL;
1570 int i;
1571
1572 kvm_for_each_vcpu(i, vcpu, kvm) {
1573 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1574
1575 if (!xc)
1576 continue;
1577
1578 if (xc->delayed_irq == irq) {
1579 xc->delayed_irq = 0;
1580 xive->delayed_irqs--;
1581 return true;
1582 }
1583 }
1584 return false;
1585}
1586
1587static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1588{
1589 struct kvmppc_xive_src_block *sb;
1590 struct kvmppc_xive_irq_state *state;
1591 u64 __user *ubufp = (u64 __user *) addr;
1592 u16 idx;
1593 u64 val;
1594 u8 act_prio, guest_prio;
1595 u32 server;
1596 int rc = 0;
1597
1598 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1599 return -ENOENT;
1600
1601 pr_devel("set_source(irq=0x%lx)\n", irq);
1602
1603
1604 sb = kvmppc_xive_find_source(xive, irq, &idx);
1605 if (!sb) {
1606 pr_devel("No source, creating source block...\n");
1607 sb = kvmppc_xive_create_src_block(xive, irq);
1608 if (!sb) {
1609 pr_devel("Failed to create block...\n");
1610 return -ENOMEM;
1611 }
1612 }
1613 state = &sb->irq_state[idx];
1614
1615
1616 if (get_user(val, ubufp)) {
1617 pr_devel("fault getting user info !\n");
1618 return -EFAULT;
1619 }
1620
1621 server = val & KVM_XICS_DESTINATION_MASK;
1622 guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
1623
1624 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1625 val, server, guest_prio);
1626
1627
1628
1629
1630
1631 if (!state->ipi_number) {
1632 state->ipi_number = xive_native_alloc_irq();
1633 if (state->ipi_number == 0) {
1634 pr_devel("Failed to allocate IPI !\n");
1635 return -ENOMEM;
1636 }
1637 xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
1638 pr_devel(" src_ipi=0x%x\n", state->ipi_number);
1639 }
1640
1641
1642
1643
1644
1645
1646
1647
1648 state->guest_priority = 0;
1649 xive_lock_and_mask(xive, sb, state);
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659 act_prio = xive_prio_from_guest(guest_prio);
1660 state->act_priority = MASKED;
1661
1662
1663
1664
1665
1666
1667 arch_spin_unlock(&sb->lock);
1668
1669
1670 if (act_prio != MASKED) {
1671
1672 mutex_lock(&xive->lock);
1673 rc = xive_check_provisioning(xive->kvm, act_prio);
1674 mutex_unlock(&xive->lock);
1675
1676
1677 if (rc == 0)
1678 rc = xive_target_interrupt(xive->kvm, state,
1679 server, act_prio);
1680
1681
1682
1683
1684
1685 }
1686
1687
1688
1689
1690
1691 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
1692 val |= KVM_XICS_PENDING;
1693 pr_devel(" Found delayed ! forcing PENDING !\n");
1694 }
1695
1696
1697 state->old_p = false;
1698 state->old_q = false;
1699 state->lsi = false;
1700 state->asserted = false;
1701
1702
1703 if (val & KVM_XICS_LEVEL_SENSITIVE) {
1704 state->lsi = true;
1705 if (val & KVM_XICS_PENDING)
1706 state->asserted = true;
1707 pr_devel(" LSI ! Asserted=%d\n", state->asserted);
1708 }
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720 if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
1721 state->old_p = true;
1722 if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
1723 state->old_q = true;
1724
1725 pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q);
1726
1727
1728
1729
1730
1731
1732 if (val & KVM_XICS_MASKED) {
1733 pr_devel(" masked, saving prio\n");
1734 state->guest_priority = MASKED;
1735 state->saved_priority = guest_prio;
1736 } else {
1737 pr_devel(" unmasked, restoring to prio %d\n", guest_prio);
1738 xive_finish_unmask(xive, sb, state, guest_prio);
1739 state->saved_priority = guest_prio;
1740 }
1741
1742
1743 if (!state->valid)
1744 xive->src_count++;
1745 state->valid = true;
1746
1747 return 0;
1748}
1749
1750int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1751 bool line_status)
1752{
1753 struct kvmppc_xive *xive = kvm->arch.xive;
1754 struct kvmppc_xive_src_block *sb;
1755 struct kvmppc_xive_irq_state *state;
1756 u16 idx;
1757
1758 if (!xive)
1759 return -ENODEV;
1760
1761 sb = kvmppc_xive_find_source(xive, irq, &idx);
1762 if (!sb)
1763 return -EINVAL;
1764
1765
1766 state = &sb->irq_state[idx];
1767 if (!state->valid)
1768 return -EINVAL;
1769
1770
1771 if (state->pt_number)
1772 return -EINVAL;
1773
1774 if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
1775 state->asserted = 1;
1776 else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
1777 state->asserted = 0;
1778 return 0;
1779 }
1780
1781
1782 xive_irq_trigger(&state->ipi_data);
1783
1784 return 0;
1785}
1786
1787static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1788{
1789 struct kvmppc_xive *xive = dev->private;
1790
1791
1792 switch (attr->group) {
1793 case KVM_DEV_XICS_GRP_SOURCES:
1794 return xive_set_source(xive, attr->attr, attr->addr);
1795 }
1796 return -ENXIO;
1797}
1798
1799static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1800{
1801 struct kvmppc_xive *xive = dev->private;
1802
1803
1804 switch (attr->group) {
1805 case KVM_DEV_XICS_GRP_SOURCES:
1806 return xive_get_source(xive, attr->attr, attr->addr);
1807 }
1808 return -ENXIO;
1809}
1810
1811static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1812{
1813
1814 switch (attr->group) {
1815 case KVM_DEV_XICS_GRP_SOURCES:
1816 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1817 attr->attr < KVMPPC_XICS_NR_IRQS)
1818 return 0;
1819 break;
1820 }
1821 return -ENXIO;
1822}
1823
1824static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
1825{
1826 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
1827 xive_native_configure_irq(hw_num, 0, MASKED, 0);
1828}
1829
1830void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
1831{
1832 int i;
1833
1834 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1835 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
1836
1837 if (!state->valid)
1838 continue;
1839
1840 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
1841 xive_cleanup_irq_data(&state->ipi_data);
1842 xive_native_free_irq(state->ipi_number);
1843
1844
1845 if (state->pt_number)
1846 kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
1847
1848 state->valid = false;
1849 }
1850}
1851
1852
1853
1854
1855static void kvmppc_xive_release(struct kvm_device *dev)
1856{
1857 struct kvmppc_xive *xive = dev->private;
1858 struct kvm *kvm = xive->kvm;
1859 struct kvm_vcpu *vcpu;
1860 int i;
1861
1862 pr_devel("Releasing xive device\n");
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873 debugfs_remove(xive->dentry);
1874
1875
1876
1877
1878 kvm_for_each_vcpu(i, vcpu, kvm) {
1879
1880
1881
1882
1883
1884
1885
1886
1887 mutex_lock(&vcpu->mutex);
1888 kvmppc_xive_cleanup_vcpu(vcpu);
1889 mutex_unlock(&vcpu->mutex);
1890 }
1891
1892
1893
1894
1895
1896
1897
1898 kvm->arch.xive = NULL;
1899
1900
1901 for (i = 0; i <= xive->max_sbid; i++) {
1902 if (xive->src_blocks[i])
1903 kvmppc_xive_free_sources(xive->src_blocks[i]);
1904 kfree(xive->src_blocks[i]);
1905 xive->src_blocks[i] = NULL;
1906 }
1907
1908 if (xive->vp_base != XIVE_INVALID_VP)
1909 xive_native_free_vp_block(xive->vp_base);
1910
1911
1912
1913
1914
1915
1916
1917
1918 kfree(dev);
1919}
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type)
1931{
1932 struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ?
1933 &kvm->arch.xive_devices.native :
1934 &kvm->arch.xive_devices.xics_on_xive;
1935 struct kvmppc_xive *xive = *kvm_xive_device;
1936
1937 if (!xive) {
1938 xive = kzalloc(sizeof(*xive), GFP_KERNEL);
1939 *kvm_xive_device = xive;
1940 } else {
1941 memset(xive, 0, sizeof(*xive));
1942 }
1943
1944 return xive;
1945}
1946
1947
1948
1949
1950static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
1951{
1952 struct kvmppc_xive *xive;
1953 struct kvm *kvm = dev->kvm;
1954 int ret = 0;
1955
1956 pr_devel("Creating xive for partition\n");
1957
1958 xive = kvmppc_xive_get_device(kvm, type);
1959 if (!xive)
1960 return -ENOMEM;
1961
1962 dev->private = xive;
1963 xive->dev = dev;
1964 xive->kvm = kvm;
1965 mutex_init(&xive->lock);
1966
1967
1968 if (kvm->arch.xive)
1969 ret = -EEXIST;
1970 else
1971 kvm->arch.xive = xive;
1972
1973
1974 xive->q_order = xive_native_default_eq_shift();
1975 if (xive->q_order < PAGE_SHIFT)
1976 xive->q_page_order = 0;
1977 else
1978 xive->q_page_order = xive->q_order - PAGE_SHIFT;
1979
1980
1981 xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS);
1982 pr_devel("VP_Base=%x\n", xive->vp_base);
1983
1984 if (xive->vp_base == XIVE_INVALID_VP)
1985 ret = -ENOMEM;
1986
1987 xive->single_escalation = xive_native_has_single_escalation();
1988
1989 if (ret)
1990 return ret;
1991
1992 return 0;
1993}
1994
1995int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
1996{
1997 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1998 unsigned int i;
1999
2000 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
2001 struct xive_q *q = &xc->queues[i];
2002 u32 i0, i1, idx;
2003
2004 if (!q->qpage && !xc->esc_virq[i])
2005 continue;
2006
2007 seq_printf(m, " [q%d]: ", i);
2008
2009 if (q->qpage) {
2010 idx = q->idx;
2011 i0 = be32_to_cpup(q->qpage + idx);
2012 idx = (idx + 1) & q->msk;
2013 i1 = be32_to_cpup(q->qpage + idx);
2014 seq_printf(m, "T=%d %08x %08x...\n", q->toggle,
2015 i0, i1);
2016 }
2017 if (xc->esc_virq[i]) {
2018 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
2019 struct xive_irq_data *xd =
2020 irq_data_get_irq_handler_data(d);
2021 u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
2022
2023 seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
2024 (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
2025 (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
2026 xc->esc_virq[i], pq, xd->eoi_page);
2027 seq_puts(m, "\n");
2028 }
2029 }
2030 return 0;
2031}
2032
2033static int xive_debug_show(struct seq_file *m, void *private)
2034{
2035 struct kvmppc_xive *xive = m->private;
2036 struct kvm *kvm = xive->kvm;
2037 struct kvm_vcpu *vcpu;
2038 u64 t_rm_h_xirr = 0;
2039 u64 t_rm_h_ipoll = 0;
2040 u64 t_rm_h_cppr = 0;
2041 u64 t_rm_h_eoi = 0;
2042 u64 t_rm_h_ipi = 0;
2043 u64 t_vm_h_xirr = 0;
2044 u64 t_vm_h_ipoll = 0;
2045 u64 t_vm_h_cppr = 0;
2046 u64 t_vm_h_eoi = 0;
2047 u64 t_vm_h_ipi = 0;
2048 unsigned int i;
2049
2050 if (!kvm)
2051 return 0;
2052
2053 seq_printf(m, "=========\nVCPU state\n=========\n");
2054
2055 kvm_for_each_vcpu(i, vcpu, kvm) {
2056 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2057
2058 if (!xc)
2059 continue;
2060
2061 seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x"
2062 " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
2063 xc->server_num, xc->cppr, xc->hw_cppr,
2064 xc->mfrr, xc->pending,
2065 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
2066
2067 kvmppc_xive_debug_show_queues(m, vcpu);
2068
2069 t_rm_h_xirr += xc->stat_rm_h_xirr;
2070 t_rm_h_ipoll += xc->stat_rm_h_ipoll;
2071 t_rm_h_cppr += xc->stat_rm_h_cppr;
2072 t_rm_h_eoi += xc->stat_rm_h_eoi;
2073 t_rm_h_ipi += xc->stat_rm_h_ipi;
2074 t_vm_h_xirr += xc->stat_vm_h_xirr;
2075 t_vm_h_ipoll += xc->stat_vm_h_ipoll;
2076 t_vm_h_cppr += xc->stat_vm_h_cppr;
2077 t_vm_h_eoi += xc->stat_vm_h_eoi;
2078 t_vm_h_ipi += xc->stat_vm_h_ipi;
2079 }
2080
2081 seq_printf(m, "Hcalls totals\n");
2082 seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
2083 seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
2084 seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
2085 seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
2086 seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
2087
2088 return 0;
2089}
2090
2091DEFINE_SHOW_ATTRIBUTE(xive_debug);
2092
2093static void xive_debugfs_init(struct kvmppc_xive *xive)
2094{
2095 char *name;
2096
2097 name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
2098 if (!name) {
2099 pr_err("%s: no memory for name\n", __func__);
2100 return;
2101 }
2102
2103 xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
2104 xive, &xive_debug_fops);
2105
2106 pr_debug("%s: created %s\n", __func__, name);
2107 kfree(name);
2108}
2109
2110static void kvmppc_xive_init(struct kvm_device *dev)
2111{
2112 struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
2113
2114
2115 xive_debugfs_init(xive);
2116}
2117
2118struct kvm_device_ops kvm_xive_ops = {
2119 .name = "kvm-xive",
2120 .create = kvmppc_xive_create,
2121 .init = kvmppc_xive_init,
2122 .release = kvmppc_xive_release,
2123 .set_attr = xive_set_attr,
2124 .get_attr = xive_get_attr,
2125 .has_attr = xive_has_attr,
2126};
2127
2128void kvmppc_xive_init_module(void)
2129{
2130 __xive_vm_h_xirr = xive_vm_h_xirr;
2131 __xive_vm_h_ipoll = xive_vm_h_ipoll;
2132 __xive_vm_h_ipi = xive_vm_h_ipi;
2133 __xive_vm_h_cppr = xive_vm_h_cppr;
2134 __xive_vm_h_eoi = xive_vm_h_eoi;
2135}
2136
2137void kvmppc_xive_exit_module(void)
2138{
2139 __xive_vm_h_xirr = NULL;
2140 __xive_vm_h_ipoll = NULL;
2141 __xive_vm_h_ipi = NULL;
2142 __xive_vm_h_cppr = NULL;
2143 __xive_vm_h_eoi = NULL;
2144}
2145