1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) "xive-kvm: " fmt
10
11#include <linux/kernel.h>
12#include <linux/kvm_host.h>
13#include <linux/err.h>
14#include <linux/gfp.h>
15#include <linux/spinlock.h>
16#include <linux/delay.h>
17#include <linux/percpu.h>
18#include <linux/cpumask.h>
19#include <linux/uaccess.h>
20#include <asm/kvm_book3s.h>
21#include <asm/kvm_ppc.h>
22#include <asm/hvcall.h>
23#include <asm/xics.h>
24#include <asm/xive.h>
25#include <asm/xive-regs.h>
26#include <asm/debug.h>
27#include <asm/debugfs.h>
28#include <asm/time.h>
29#include <asm/opal.h>
30
31#include <linux/debugfs.h>
32#include <linux/seq_file.h>
33
34#include "book3s_xive.h"
35
36
37
38
39
40
41
42
43
44#define XIVE_RUNTIME_CHECKS
45#define X_PFX xive_vm_
46#define X_STATIC static
47#define X_STAT_PFX stat_vm_
48#define __x_tima xive_tima
49#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
50#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
51#define __x_writeb __raw_writeb
52#define __x_readw __raw_readw
53#define __x_readq __raw_readq
54#define __x_writeq __raw_writeq
55
56#include "book3s_xive_template.c"
57
58
59
60
61
62#define XIVE_Q_GAP 2
63
64
65
66
67
68static bool xive_irq_trigger(struct xive_irq_data *xd)
69{
70
71 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
72 return false;
73
74
75 if (WARN_ON(!xd->trig_mmio))
76 return false;
77
78 out_be64(xd->trig_mmio, 0);
79
80 return true;
81}
82
83static irqreturn_t xive_esc_irq(int irq, void *data)
84{
85 struct kvm_vcpu *vcpu = data;
86
87 vcpu->arch.irq_pending = 1;
88 smp_mb();
89 if (vcpu->arch.ceded)
90 kvmppc_fast_vcpu_kick(vcpu);
91
92
93
94
95
96
97
98
99
100
101 vcpu->arch.xive_esc_on = false;
102
103 return IRQ_HANDLED;
104}
105
106static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio)
107{
108 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
109 struct xive_q *q = &xc->queues[prio];
110 char *name = NULL;
111 int rc;
112
113
114 if (xc->esc_virq[prio])
115 return 0;
116
117
118 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
119 if (!xc->esc_virq[prio]) {
120 pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
121 prio, xc->server_num);
122 return -EIO;
123 }
124
125 if (xc->xive->single_escalation)
126 name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
127 vcpu->kvm->arch.lpid, xc->server_num);
128 else
129 name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
130 vcpu->kvm->arch.lpid, xc->server_num, prio);
131 if (!name) {
132 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
133 prio, xc->server_num);
134 rc = -ENOMEM;
135 goto error;
136 }
137
138 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
139
140 rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
141 IRQF_NO_THREAD, name, vcpu);
142 if (rc) {
143 pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
144 prio, xc->server_num);
145 goto error;
146 }
147 xc->esc_virq_names[prio] = name;
148
149
150
151
152
153
154
155
156
157 if (xc->xive->single_escalation) {
158 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
159 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
160
161 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
162 vcpu->arch.xive_esc_raddr = xd->eoi_page;
163 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
164 xd->flags |= XIVE_IRQ_NO_EOI;
165 }
166
167 return 0;
168error:
169 irq_dispose_mapping(xc->esc_virq[prio]);
170 xc->esc_virq[prio] = 0;
171 kfree(name);
172 return rc;
173}
174
175static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
176{
177 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
178 struct kvmppc_xive *xive = xc->xive;
179 struct xive_q *q = &xc->queues[prio];
180 void *qpage;
181 int rc;
182
183 if (WARN_ON(q->qpage))
184 return 0;
185
186
187 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
188 if (!qpage) {
189 pr_err("Failed to allocate queue %d for VCPU %d\n",
190 prio, xc->server_num);
191 return -ENOMEM;
192 }
193 memset(qpage, 0, 1 << xive->q_order);
194
195
196
197
198
199
200
201
202 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
203 xive->q_order, true);
204 if (rc)
205 pr_err("Failed to configure queue %d for VCPU %d\n",
206 prio, xc->server_num);
207 return rc;
208}
209
210
211static int xive_check_provisioning(struct kvm *kvm, u8 prio)
212{
213 struct kvmppc_xive *xive = kvm->arch.xive;
214 struct kvm_vcpu *vcpu;
215 int i, rc;
216
217 lockdep_assert_held(&kvm->lock);
218
219
220 if (xive->qmap & (1 << prio))
221 return 0;
222
223 pr_devel("Provisioning prio... %d\n", prio);
224
225
226 kvm_for_each_vcpu(i, vcpu, kvm) {
227 if (!vcpu->arch.xive_vcpu)
228 continue;
229 rc = xive_provision_queue(vcpu, prio);
230 if (rc == 0 && !xive->single_escalation)
231 xive_attach_escalation(vcpu, prio);
232 if (rc)
233 return rc;
234 }
235
236
237 mb();
238 xive->qmap |= (1 << prio);
239 return 0;
240}
241
242static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
243{
244 struct kvm_vcpu *vcpu;
245 struct kvmppc_xive_vcpu *xc;
246 struct xive_q *q;
247
248
249 vcpu = kvmppc_xive_find_server(kvm, server);
250 if (!vcpu) {
251 pr_warn("%s: Can't find server %d\n", __func__, server);
252 return;
253 }
254 xc = vcpu->arch.xive_vcpu;
255 if (WARN_ON(!xc))
256 return;
257
258 q = &xc->queues[prio];
259 atomic_inc(&q->pending_count);
260}
261
262static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
263{
264 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
265 struct xive_q *q;
266 u32 max;
267
268 if (WARN_ON(!xc))
269 return -ENXIO;
270 if (!xc->valid)
271 return -ENXIO;
272
273 q = &xc->queues[prio];
274 if (WARN_ON(!q->qpage))
275 return -ENXIO;
276
277
278 max = (q->msk + 1) - XIVE_Q_GAP;
279 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
280}
281
282static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
283{
284 struct kvm_vcpu *vcpu;
285 int i, rc;
286
287
288 vcpu = kvmppc_xive_find_server(kvm, *server);
289 if (!vcpu) {
290 pr_devel("Can't find server %d\n", *server);
291 return -EINVAL;
292 }
293
294 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
295
296
297 rc = xive_try_pick_queue(vcpu, prio);
298 if (rc == 0)
299 return rc;
300
301 pr_devel(" .. failed, looking up candidate...\n");
302
303
304 kvm_for_each_vcpu(i, vcpu, kvm) {
305 if (!vcpu->arch.xive_vcpu)
306 continue;
307 rc = xive_try_pick_queue(vcpu, prio);
308 if (rc == 0) {
309 *server = vcpu->arch.xive_vcpu->server_num;
310 pr_devel(" found on 0x%x/%d\n", *server, prio);
311 return rc;
312 }
313 }
314 pr_devel(" no available target !\n");
315
316
317 return -EBUSY;
318}
319
320static u32 xive_vp(struct kvmppc_xive *xive, u32 server)
321{
322 return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
323}
324
325static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
326 struct kvmppc_xive_src_block *sb,
327 struct kvmppc_xive_irq_state *state)
328{
329 struct xive_irq_data *xd;
330 u32 hw_num;
331 u8 old_prio;
332 u64 val;
333
334
335
336
337
338 for (;;) {
339 arch_spin_lock(&sb->lock);
340 old_prio = state->guest_priority;
341 state->guest_priority = MASKED;
342 mb();
343 if (!state->in_eoi)
344 break;
345 state->guest_priority = old_prio;
346 arch_spin_unlock(&sb->lock);
347 }
348
349
350 if (old_prio == MASKED)
351 return old_prio;
352
353
354 kvmppc_xive_select_irq(state, &hw_num, &xd);
355
356
357
358
359
360
361
362
363
364
365
366
367
368 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
369 xive_native_configure_irq(hw_num,
370 xive_vp(xive, state->act_server),
371 MASKED, state->number);
372
373 state->old_p = true;
374 state->old_q = false;
375 } else {
376
377 val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
378 state->old_p = !!(val & 2);
379 state->old_q = !!(val & 1);
380
381
382
383
384
385 xive_native_sync_source(hw_num);
386 }
387
388 return old_prio;
389}
390
391static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
392 struct kvmppc_xive_irq_state *state)
393{
394
395
396
397 for (;;) {
398 arch_spin_lock(&sb->lock);
399 if (!state->in_eoi)
400 break;
401 arch_spin_unlock(&sb->lock);
402 }
403}
404
405static void xive_finish_unmask(struct kvmppc_xive *xive,
406 struct kvmppc_xive_src_block *sb,
407 struct kvmppc_xive_irq_state *state,
408 u8 prio)
409{
410 struct xive_irq_data *xd;
411 u32 hw_num;
412
413
414 if (state->guest_priority != MASKED)
415 goto bail;
416
417
418 kvmppc_xive_select_irq(state, &hw_num, &xd);
419
420
421
422
423
424 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
425 xive_native_configure_irq(hw_num,
426 xive_vp(xive, state->act_server),
427 state->act_priority, state->number);
428
429 if (!state->old_p)
430 xive_vm_source_eoi(hw_num, xd);
431
432 if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
433 xive_irq_trigger(xd);
434 goto bail;
435 }
436
437
438 if (state->old_q)
439 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
440
441
442
443
444
445
446 if (!state->old_p)
447 xive_vm_source_eoi(hw_num, xd);
448
449
450 mb();
451bail:
452 state->guest_priority = prio;
453}
454
455
456
457
458
459
460
461
462static int xive_target_interrupt(struct kvm *kvm,
463 struct kvmppc_xive_irq_state *state,
464 u32 server, u8 prio)
465{
466 struct kvmppc_xive *xive = kvm->arch.xive;
467 u32 hw_num;
468 int rc;
469
470
471
472
473
474
475 rc = xive_select_target(kvm, &server, prio);
476
477
478
479
480
481 if (rc)
482 return rc;
483
484
485
486
487
488
489 if (state->act_priority != MASKED)
490 xive_inc_q_pending(kvm,
491 state->act_server,
492 state->act_priority);
493
494
495
496 state->act_priority = prio;
497 state->act_server = server;
498
499
500 kvmppc_xive_select_irq(state, &hw_num, NULL);
501
502 return xive_native_configure_irq(hw_num,
503 xive_vp(xive, server),
504 prio, state->number);
505}
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
548 u32 priority)
549{
550 struct kvmppc_xive *xive = kvm->arch.xive;
551 struct kvmppc_xive_src_block *sb;
552 struct kvmppc_xive_irq_state *state;
553 u8 new_act_prio;
554 int rc = 0;
555 u16 idx;
556
557 if (!xive)
558 return -ENODEV;
559
560 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
561 irq, server, priority);
562
563
564 if (priority != MASKED)
565 rc = xive_check_provisioning(xive->kvm,
566 xive_prio_from_guest(priority));
567 if (rc) {
568 pr_devel(" provisioning failure %d !\n", rc);
569 return rc;
570 }
571
572 sb = kvmppc_xive_find_source(xive, irq, &idx);
573 if (!sb)
574 return -EINVAL;
575 state = &sb->irq_state[idx];
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591 if (priority == MASKED)
592 xive_lock_and_mask(xive, sb, state);
593 else
594 xive_lock_for_unmask(sb, state);
595
596
597
598
599
600
601
602 new_act_prio = state->act_priority;
603 if (priority != MASKED)
604 new_act_prio = xive_prio_from_guest(priority);
605
606 pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
607 new_act_prio, state->act_server, state->act_priority);
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623 if (new_act_prio != MASKED &&
624 (state->act_server != server ||
625 state->act_priority != new_act_prio))
626 rc = xive_target_interrupt(kvm, state, server, new_act_prio);
627
628
629
630
631
632 if (priority != MASKED)
633 xive_finish_unmask(xive, sb, state, priority);
634
635
636
637
638
639 state->saved_priority = priority;
640
641 arch_spin_unlock(&sb->lock);
642 return rc;
643}
644
645int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
646 u32 *priority)
647{
648 struct kvmppc_xive *xive = kvm->arch.xive;
649 struct kvmppc_xive_src_block *sb;
650 struct kvmppc_xive_irq_state *state;
651 u16 idx;
652
653 if (!xive)
654 return -ENODEV;
655
656 sb = kvmppc_xive_find_source(xive, irq, &idx);
657 if (!sb)
658 return -EINVAL;
659 state = &sb->irq_state[idx];
660 arch_spin_lock(&sb->lock);
661 *server = state->act_server;
662 *priority = state->guest_priority;
663 arch_spin_unlock(&sb->lock);
664
665 return 0;
666}
667
668int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
669{
670 struct kvmppc_xive *xive = kvm->arch.xive;
671 struct kvmppc_xive_src_block *sb;
672 struct kvmppc_xive_irq_state *state;
673 u16 idx;
674
675 if (!xive)
676 return -ENODEV;
677
678 sb = kvmppc_xive_find_source(xive, irq, &idx);
679 if (!sb)
680 return -EINVAL;
681 state = &sb->irq_state[idx];
682
683 pr_devel("int_on(irq=0x%x)\n", irq);
684
685
686
687
688 if (state->act_priority == MASKED) {
689 pr_devel("int_on on untargetted interrupt\n");
690 return -EINVAL;
691 }
692
693
694 if (state->saved_priority == MASKED)
695 return 0;
696
697
698
699
700 xive_lock_for_unmask(sb, state);
701 xive_finish_unmask(xive, sb, state, state->saved_priority);
702 arch_spin_unlock(&sb->lock);
703
704 return 0;
705}
706
707int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
708{
709 struct kvmppc_xive *xive = kvm->arch.xive;
710 struct kvmppc_xive_src_block *sb;
711 struct kvmppc_xive_irq_state *state;
712 u16 idx;
713
714 if (!xive)
715 return -ENODEV;
716
717 sb = kvmppc_xive_find_source(xive, irq, &idx);
718 if (!sb)
719 return -EINVAL;
720 state = &sb->irq_state[idx];
721
722 pr_devel("int_off(irq=0x%x)\n", irq);
723
724
725
726
727 state->saved_priority = xive_lock_and_mask(xive, sb, state);
728 arch_spin_unlock(&sb->lock);
729
730 return 0;
731}
732
733static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
734{
735 struct kvmppc_xive_src_block *sb;
736 struct kvmppc_xive_irq_state *state;
737 u16 idx;
738
739 sb = kvmppc_xive_find_source(xive, irq, &idx);
740 if (!sb)
741 return false;
742 state = &sb->irq_state[idx];
743 if (!state->valid)
744 return false;
745
746
747
748
749
750 xive_irq_trigger(&state->ipi_data);
751
752 return true;
753}
754
755u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
756{
757 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
758
759 if (!xc)
760 return 0;
761
762
763 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
764 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
765 (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
766}
767
768int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
769{
770 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
771 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
772 u8 cppr, mfrr;
773 u32 xisr;
774
775 if (!xc || !xive)
776 return -ENOENT;
777
778
779 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
780 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
781 KVM_REG_PPC_ICP_XISR_MASK;
782 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
783
784 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
785 xc->server_num, cppr, mfrr, xisr);
786
787
788
789
790
791 if (WARN_ON(vcpu->arch.xive_pushed))
792 return -EIO;
793
794
795 vcpu->arch.xive_saved_state.cppr = cppr;
796 xc->hw_cppr = xc->cppr = cppr;
797
798
799
800
801
802
803
804 xc->mfrr = mfrr;
805 if (mfrr < cppr)
806 xive_irq_trigger(&xc->vp_ipi_data);
807
808
809
810
811
812
813
814
815
816
817 if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
818 xc->delayed_irq = xisr;
819 xive->delayed_irqs++;
820 pr_devel(" xisr restore delayed\n");
821 }
822
823 return 0;
824}
825
826int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
827 struct irq_desc *host_desc)
828{
829 struct kvmppc_xive *xive = kvm->arch.xive;
830 struct kvmppc_xive_src_block *sb;
831 struct kvmppc_xive_irq_state *state;
832 struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
833 unsigned int host_irq = irq_desc_get_irq(host_desc);
834 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
835 u16 idx;
836 u8 prio;
837 int rc;
838
839 if (!xive)
840 return -ENODEV;
841
842 pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
843
844 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
845 if (!sb)
846 return -EINVAL;
847 state = &sb->irq_state[idx];
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862 rc = irq_set_vcpu_affinity(host_irq, state);
863 if (rc) {
864 pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
865 return rc;
866 }
867
868
869
870
871
872
873 prio = xive_lock_and_mask(xive, sb, state);
874 pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
875 state->old_p, state->old_q);
876
877
878 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
879
880
881 state->pt_number = hw_irq;
882 state->pt_data = irq_data_get_irq_handler_data(host_data);
883
884
885
886
887
888
889
890 xive_native_configure_irq(hw_irq,
891 xive_vp(xive, state->act_server),
892 state->act_priority, state->number);
893
894
895
896
897
898
899
900
901 if (prio != MASKED && !state->old_p)
902 xive_vm_source_eoi(hw_irq, state->pt_data);
903
904
905 state->old_p = state->old_q = false;
906
907
908 mb();
909 state->guest_priority = prio;
910 arch_spin_unlock(&sb->lock);
911
912 return 0;
913}
914EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
915
916int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
917 struct irq_desc *host_desc)
918{
919 struct kvmppc_xive *xive = kvm->arch.xive;
920 struct kvmppc_xive_src_block *sb;
921 struct kvmppc_xive_irq_state *state;
922 unsigned int host_irq = irq_desc_get_irq(host_desc);
923 u16 idx;
924 u8 prio;
925 int rc;
926
927 if (!xive)
928 return -ENODEV;
929
930 pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
931
932 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
933 if (!sb)
934 return -EINVAL;
935 state = &sb->irq_state[idx];
936
937
938
939
940
941
942 prio = xive_lock_and_mask(xive, sb, state);
943 pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
944 state->old_p, state->old_q);
945
946
947
948
949
950
951 if (state->old_p)
952 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
953
954
955 rc = irq_set_vcpu_affinity(host_irq, NULL);
956 if (rc) {
957 pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
958 return rc;
959 }
960
961
962 state->pt_number = 0;
963 state->pt_data = NULL;
964
965
966 xive_native_configure_irq(state->ipi_number,
967 xive_vp(xive, state->act_server),
968 state->act_priority, state->number);
969
970
971
972
973
974
975 if (prio == MASKED || state->old_p)
976 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
977 else
978 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
979
980
981 mb();
982 state->guest_priority = prio;
983 arch_spin_unlock(&sb->lock);
984
985 return 0;
986}
987EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
988
989static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
990{
991 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
992 struct kvm *kvm = vcpu->kvm;
993 struct kvmppc_xive *xive = kvm->arch.xive;
994 int i, j;
995
996 for (i = 0; i <= xive->max_sbid; i++) {
997 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
998
999 if (!sb)
1000 continue;
1001 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
1002 struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
1003
1004 if (!state->valid)
1005 continue;
1006 if (state->act_priority == MASKED)
1007 continue;
1008 if (state->act_server != xc->server_num)
1009 continue;
1010
1011
1012 arch_spin_lock(&sb->lock);
1013 state->act_priority = MASKED;
1014 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1015 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
1016 if (state->pt_number) {
1017 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
1018 xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
1019 }
1020 arch_spin_unlock(&sb->lock);
1021 }
1022 }
1023}
1024
1025void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1026{
1027 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1028 struct kvmppc_xive *xive = xc->xive;
1029 int i;
1030
1031 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
1032
1033
1034 xc->valid = false;
1035 kvmppc_xive_disable_vcpu_interrupts(vcpu);
1036
1037
1038 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1039
1040
1041 xive_native_disable_vp(xc->vp_id);
1042
1043
1044 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1045 struct xive_q *q = &xc->queues[i];
1046
1047
1048 if (xc->esc_virq[i]) {
1049 free_irq(xc->esc_virq[i], vcpu);
1050 irq_dispose_mapping(xc->esc_virq[i]);
1051 kfree(xc->esc_virq_names[i]);
1052 }
1053
1054 xive_native_disable_queue(xc->vp_id, q, i);
1055 if (q->qpage) {
1056 free_pages((unsigned long)q->qpage,
1057 xive->q_page_order);
1058 q->qpage = NULL;
1059 }
1060 }
1061
1062
1063 if (xc->vp_ipi) {
1064 xive_cleanup_irq_data(&xc->vp_ipi_data);
1065 xive_native_free_irq(xc->vp_ipi);
1066 }
1067
1068 kfree(xc);
1069}
1070
1071int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1072 struct kvm_vcpu *vcpu, u32 cpu)
1073{
1074 struct kvmppc_xive *xive = dev->private;
1075 struct kvmppc_xive_vcpu *xc;
1076 int i, r = -EBUSY;
1077
1078 pr_devel("connect_vcpu(cpu=%d)\n", cpu);
1079
1080 if (dev->ops != &kvm_xive_ops) {
1081 pr_devel("Wrong ops !\n");
1082 return -EPERM;
1083 }
1084 if (xive->kvm != vcpu->kvm)
1085 return -EPERM;
1086 if (vcpu->arch.irq_type)
1087 return -EBUSY;
1088 if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
1089 pr_devel("Duplicate !\n");
1090 return -EEXIST;
1091 }
1092 if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
1093 pr_devel("Out of bounds !\n");
1094 return -EINVAL;
1095 }
1096 xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1097 if (!xc)
1098 return -ENOMEM;
1099
1100
1101 mutex_lock(&vcpu->kvm->lock);
1102 vcpu->arch.xive_vcpu = xc;
1103 xc->xive = xive;
1104 xc->vcpu = vcpu;
1105 xc->server_num = cpu;
1106 xc->vp_id = xive_vp(xive, cpu);
1107 xc->mfrr = 0xff;
1108 xc->valid = true;
1109
1110 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1111 if (r)
1112 goto bail;
1113
1114
1115 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
1116 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1117
1118
1119 xc->vp_ipi = xive_native_alloc_irq();
1120 if (!xc->vp_ipi) {
1121 pr_err("Failed to allocate xive irq for VCPU IPI\n");
1122 r = -EIO;
1123 goto bail;
1124 }
1125 pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1126
1127 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1128 if (r)
1129 goto bail;
1130
1131
1132
1133
1134
1135 r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
1136 if (r) {
1137 pr_err("Failed to enable VP in OPAL, err %d\n", r);
1138 goto bail;
1139 }
1140
1141
1142
1143
1144
1145
1146
1147
1148 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1149 struct xive_q *q = &xc->queues[i];
1150
1151
1152 if (i == 7 && xive->single_escalation)
1153 break;
1154
1155
1156 if (xive->qmap & (1 << i)) {
1157 r = xive_provision_queue(vcpu, i);
1158 if (r == 0 && !xive->single_escalation)
1159 xive_attach_escalation(vcpu, i);
1160 if (r)
1161 goto bail;
1162 } else {
1163 r = xive_native_configure_queue(xc->vp_id,
1164 q, i, NULL, 0, true);
1165 if (r) {
1166 pr_err("Failed to configure queue %d for VCPU %d\n",
1167 i, cpu);
1168 goto bail;
1169 }
1170 }
1171 }
1172
1173
1174 r = xive_attach_escalation(vcpu, 0);
1175 if (r)
1176 goto bail;
1177
1178
1179 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
1180 if (!r)
1181 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
1182
1183bail:
1184 mutex_unlock(&vcpu->kvm->lock);
1185 if (r) {
1186 kvmppc_xive_cleanup_vcpu(vcpu);
1187 return r;
1188 }
1189
1190 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1191 return 0;
1192}
1193
1194
1195
1196
1197static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
1198{
1199 struct kvmppc_xive_src_block *sb;
1200 struct kvmppc_xive_irq_state *state;
1201 u16 idx;
1202
1203 sb = kvmppc_xive_find_source(xive, irq, &idx);
1204 if (!sb)
1205 return;
1206
1207 state = &sb->irq_state[idx];
1208
1209
1210 if (!state->valid) {
1211 pr_err("invalid irq 0x%x in cpu queue!\n", irq);
1212 return;
1213 }
1214
1215
1216
1217
1218
1219
1220 if (!state->saved_p)
1221 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
1222
1223
1224 state->in_queue = true;
1225}
1226
1227static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
1228 struct kvmppc_xive_src_block *sb,
1229 u32 irq)
1230{
1231 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1232
1233 if (!state->valid)
1234 return;
1235
1236
1237 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
1238
1239
1240 state->saved_p = state->old_p;
1241 state->saved_q = state->old_q;
1242
1243
1244 arch_spin_unlock(&sb->lock);
1245}
1246
1247static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
1248 struct kvmppc_xive_src_block *sb,
1249 u32 irq)
1250{
1251 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1252
1253 if (!state->valid)
1254 return;
1255
1256
1257
1258
1259
1260
1261 xive_lock_for_unmask(sb, state);
1262
1263
1264 if (state->saved_scan_prio != MASKED)
1265 xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
1266
1267
1268 arch_spin_unlock(&sb->lock);
1269}
1270
1271static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
1272{
1273 u32 idx = q->idx;
1274 u32 toggle = q->toggle;
1275 u32 irq;
1276
1277 do {
1278 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
1279 if (irq > XICS_IPI)
1280 xive_pre_save_set_queued(xive, irq);
1281 } while(irq);
1282}
1283
1284static void xive_pre_save_scan(struct kvmppc_xive *xive)
1285{
1286 struct kvm_vcpu *vcpu = NULL;
1287 int i, j;
1288
1289
1290
1291
1292
1293 for (i = 0; i <= xive->max_sbid; i++) {
1294 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1295 if (!sb)
1296 continue;
1297 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1298 xive_pre_save_mask_irq(xive, sb, j);
1299 }
1300
1301
1302 kvm_for_each_vcpu(i, vcpu, xive->kvm) {
1303 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1304 if (!xc)
1305 continue;
1306 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
1307 if (xc->queues[j].qpage)
1308 xive_pre_save_queue(xive, &xc->queues[j]);
1309 }
1310 }
1311
1312
1313 for (i = 0; i <= xive->max_sbid; i++) {
1314 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1315 if (!sb)
1316 continue;
1317 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1318 xive_pre_save_unmask_irq(xive, sb, j);
1319 }
1320}
1321
1322static void xive_post_save_scan(struct kvmppc_xive *xive)
1323{
1324 u32 i, j;
1325
1326
1327 for (i = 0; i <= xive->max_sbid; i++) {
1328 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1329 if (!sb)
1330 continue;
1331 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1332 sb->irq_state[j].in_queue = false;
1333 }
1334
1335
1336 xive->saved_src_count = 0;
1337}
1338
1339
1340
1341
1342static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1343{
1344 struct kvmppc_xive_src_block *sb;
1345 struct kvmppc_xive_irq_state *state;
1346 u64 __user *ubufp = (u64 __user *) addr;
1347 u64 val, prio;
1348 u16 idx;
1349
1350 sb = kvmppc_xive_find_source(xive, irq, &idx);
1351 if (!sb)
1352 return -ENOENT;
1353
1354 state = &sb->irq_state[idx];
1355
1356 if (!state->valid)
1357 return -ENOENT;
1358
1359 pr_devel("get_source(%ld)...\n", irq);
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377 if (xive->saved_src_count == 0)
1378 xive_pre_save_scan(xive);
1379 xive->saved_src_count++;
1380
1381
1382 val = state->act_server;
1383 prio = state->saved_scan_prio;
1384
1385 if (prio == MASKED) {
1386 val |= KVM_XICS_MASKED;
1387 prio = state->saved_priority;
1388 }
1389 val |= prio << KVM_XICS_PRIORITY_SHIFT;
1390 if (state->lsi) {
1391 val |= KVM_XICS_LEVEL_SENSITIVE;
1392 if (state->saved_p)
1393 val |= KVM_XICS_PENDING;
1394 } else {
1395 if (state->saved_p)
1396 val |= KVM_XICS_PRESENTED;
1397
1398 if (state->saved_q)
1399 val |= KVM_XICS_QUEUED;
1400
1401
1402
1403
1404
1405
1406
1407 if (state->in_queue || (prio == MASKED && state->saved_q))
1408 val |= KVM_XICS_PENDING;
1409 }
1410
1411
1412
1413
1414
1415 if (xive->saved_src_count == xive->src_count)
1416 xive_post_save_scan(xive);
1417
1418
1419 if (put_user(val, ubufp))
1420 return -EFAULT;
1421
1422 return 0;
1423}
1424
1425static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *xive,
1426 int irq)
1427{
1428 struct kvm *kvm = xive->kvm;
1429 struct kvmppc_xive_src_block *sb;
1430 int i, bid;
1431
1432 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
1433
1434 mutex_lock(&kvm->lock);
1435
1436
1437 if (xive->src_blocks[bid])
1438 goto out;
1439
1440
1441 sb = kzalloc(sizeof(*sb), GFP_KERNEL);
1442 if (!sb)
1443 goto out;
1444
1445 sb->id = bid;
1446
1447 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1448 sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
1449 sb->irq_state[i].guest_priority = MASKED;
1450 sb->irq_state[i].saved_priority = MASKED;
1451 sb->irq_state[i].act_priority = MASKED;
1452 }
1453 smp_wmb();
1454 xive->src_blocks[bid] = sb;
1455
1456 if (bid > xive->max_sbid)
1457 xive->max_sbid = bid;
1458
1459out:
1460 mutex_unlock(&kvm->lock);
1461 return xive->src_blocks[bid];
1462}
1463
1464static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
1465{
1466 struct kvm *kvm = xive->kvm;
1467 struct kvm_vcpu *vcpu = NULL;
1468 int i;
1469
1470 kvm_for_each_vcpu(i, vcpu, kvm) {
1471 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1472
1473 if (!xc)
1474 continue;
1475
1476 if (xc->delayed_irq == irq) {
1477 xc->delayed_irq = 0;
1478 xive->delayed_irqs--;
1479 return true;
1480 }
1481 }
1482 return false;
1483}
1484
1485static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1486{
1487 struct kvmppc_xive_src_block *sb;
1488 struct kvmppc_xive_irq_state *state;
1489 u64 __user *ubufp = (u64 __user *) addr;
1490 u16 idx;
1491 u64 val;
1492 u8 act_prio, guest_prio;
1493 u32 server;
1494 int rc = 0;
1495
1496 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1497 return -ENOENT;
1498
1499 pr_devel("set_source(irq=0x%lx)\n", irq);
1500
1501
1502 sb = kvmppc_xive_find_source(xive, irq, &idx);
1503 if (!sb) {
1504 pr_devel("No source, creating source block...\n");
1505 sb = xive_create_src_block(xive, irq);
1506 if (!sb) {
1507 pr_devel("Failed to create block...\n");
1508 return -ENOMEM;
1509 }
1510 }
1511 state = &sb->irq_state[idx];
1512
1513
1514 if (get_user(val, ubufp)) {
1515 pr_devel("fault getting user info !\n");
1516 return -EFAULT;
1517 }
1518
1519 server = val & KVM_XICS_DESTINATION_MASK;
1520 guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
1521
1522 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1523 val, server, guest_prio);
1524
1525
1526
1527
1528
1529 if (!state->ipi_number) {
1530 state->ipi_number = xive_native_alloc_irq();
1531 if (state->ipi_number == 0) {
1532 pr_devel("Failed to allocate IPI !\n");
1533 return -ENOMEM;
1534 }
1535 xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
1536 pr_devel(" src_ipi=0x%x\n", state->ipi_number);
1537 }
1538
1539
1540
1541
1542
1543
1544
1545
1546 state->guest_priority = 0;
1547 xive_lock_and_mask(xive, sb, state);
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557 act_prio = xive_prio_from_guest(guest_prio);
1558 state->act_priority = MASKED;
1559
1560
1561
1562
1563
1564
1565 arch_spin_unlock(&sb->lock);
1566
1567
1568 if (act_prio != MASKED) {
1569
1570 mutex_lock(&xive->kvm->lock);
1571 rc = xive_check_provisioning(xive->kvm, act_prio);
1572 mutex_unlock(&xive->kvm->lock);
1573
1574
1575 if (rc == 0)
1576 rc = xive_target_interrupt(xive->kvm, state,
1577 server, act_prio);
1578
1579
1580
1581
1582
1583 }
1584
1585
1586
1587
1588
1589 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
1590 val |= KVM_XICS_PENDING;
1591 pr_devel(" Found delayed ! forcing PENDING !\n");
1592 }
1593
1594
1595 state->old_p = false;
1596 state->old_q = false;
1597 state->lsi = false;
1598 state->asserted = false;
1599
1600
1601 if (val & KVM_XICS_LEVEL_SENSITIVE) {
1602 state->lsi = true;
1603 if (val & KVM_XICS_PENDING)
1604 state->asserted = true;
1605 pr_devel(" LSI ! Asserted=%d\n", state->asserted);
1606 }
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618 if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
1619 state->old_p = true;
1620 if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
1621 state->old_q = true;
1622
1623 pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q);
1624
1625
1626
1627
1628
1629
1630 if (val & KVM_XICS_MASKED) {
1631 pr_devel(" masked, saving prio\n");
1632 state->guest_priority = MASKED;
1633 state->saved_priority = guest_prio;
1634 } else {
1635 pr_devel(" unmasked, restoring to prio %d\n", guest_prio);
1636 xive_finish_unmask(xive, sb, state, guest_prio);
1637 state->saved_priority = guest_prio;
1638 }
1639
1640
1641 if (!state->valid)
1642 xive->src_count++;
1643 state->valid = true;
1644
1645 return 0;
1646}
1647
1648int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1649 bool line_status)
1650{
1651 struct kvmppc_xive *xive = kvm->arch.xive;
1652 struct kvmppc_xive_src_block *sb;
1653 struct kvmppc_xive_irq_state *state;
1654 u16 idx;
1655
1656 if (!xive)
1657 return -ENODEV;
1658
1659 sb = kvmppc_xive_find_source(xive, irq, &idx);
1660 if (!sb)
1661 return -EINVAL;
1662
1663
1664 state = &sb->irq_state[idx];
1665 if (!state->valid)
1666 return -EINVAL;
1667
1668
1669 if (state->pt_number)
1670 return -EINVAL;
1671
1672 if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
1673 state->asserted = 1;
1674 else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
1675 state->asserted = 0;
1676 return 0;
1677 }
1678
1679
1680 xive_irq_trigger(&state->ipi_data);
1681
1682 return 0;
1683}
1684
1685static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1686{
1687 struct kvmppc_xive *xive = dev->private;
1688
1689
1690 switch (attr->group) {
1691 case KVM_DEV_XICS_GRP_SOURCES:
1692 return xive_set_source(xive, attr->attr, attr->addr);
1693 }
1694 return -ENXIO;
1695}
1696
1697static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1698{
1699 struct kvmppc_xive *xive = dev->private;
1700
1701
1702 switch (attr->group) {
1703 case KVM_DEV_XICS_GRP_SOURCES:
1704 return xive_get_source(xive, attr->attr, attr->addr);
1705 }
1706 return -ENXIO;
1707}
1708
1709static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1710{
1711
1712 switch (attr->group) {
1713 case KVM_DEV_XICS_GRP_SOURCES:
1714 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1715 attr->attr < KVMPPC_XICS_NR_IRQS)
1716 return 0;
1717 break;
1718 }
1719 return -ENXIO;
1720}
1721
1722static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
1723{
1724 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
1725 xive_native_configure_irq(hw_num, 0, MASKED, 0);
1726 xive_cleanup_irq_data(xd);
1727}
1728
1729static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
1730{
1731 int i;
1732
1733 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1734 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
1735
1736 if (!state->valid)
1737 continue;
1738
1739 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
1740 xive_native_free_irq(state->ipi_number);
1741
1742
1743 if (state->pt_number)
1744 kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
1745
1746 state->valid = false;
1747 }
1748}
1749
1750static void kvmppc_xive_free(struct kvm_device *dev)
1751{
1752 struct kvmppc_xive *xive = dev->private;
1753 struct kvm *kvm = xive->kvm;
1754 int i;
1755
1756 debugfs_remove(xive->dentry);
1757
1758 if (kvm)
1759 kvm->arch.xive = NULL;
1760
1761
1762 for (i = 0; i <= xive->max_sbid; i++) {
1763 if (xive->src_blocks[i])
1764 kvmppc_xive_free_sources(xive->src_blocks[i]);
1765 kfree(xive->src_blocks[i]);
1766 xive->src_blocks[i] = NULL;
1767 }
1768
1769 if (xive->vp_base != XIVE_INVALID_VP)
1770 xive_native_free_vp_block(xive->vp_base);
1771
1772
1773 kfree(xive);
1774 kfree(dev);
1775}
1776
1777static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
1778{
1779 struct kvmppc_xive *xive;
1780 struct kvm *kvm = dev->kvm;
1781 int ret = 0;
1782
1783 pr_devel("Creating xive for partition\n");
1784
1785 xive = kzalloc(sizeof(*xive), GFP_KERNEL);
1786 if (!xive)
1787 return -ENOMEM;
1788
1789 dev->private = xive;
1790 xive->dev = dev;
1791 xive->kvm = kvm;
1792
1793
1794 if (kvm->arch.xive)
1795 ret = -EEXIST;
1796 else
1797 kvm->arch.xive = xive;
1798
1799
1800 xive->q_order = xive_native_default_eq_shift();
1801 if (xive->q_order < PAGE_SHIFT)
1802 xive->q_page_order = 0;
1803 else
1804 xive->q_page_order = xive->q_order - PAGE_SHIFT;
1805
1806
1807 xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS);
1808 pr_devel("VP_Base=%x\n", xive->vp_base);
1809
1810 if (xive->vp_base == XIVE_INVALID_VP)
1811 ret = -ENOMEM;
1812
1813 xive->single_escalation = xive_native_has_single_escalation();
1814
1815 if (ret) {
1816 kfree(xive);
1817 return ret;
1818 }
1819
1820 return 0;
1821}
1822
1823
1824static int xive_debug_show(struct seq_file *m, void *private)
1825{
1826 struct kvmppc_xive *xive = m->private;
1827 struct kvm *kvm = xive->kvm;
1828 struct kvm_vcpu *vcpu;
1829 u64 t_rm_h_xirr = 0;
1830 u64 t_rm_h_ipoll = 0;
1831 u64 t_rm_h_cppr = 0;
1832 u64 t_rm_h_eoi = 0;
1833 u64 t_rm_h_ipi = 0;
1834 u64 t_vm_h_xirr = 0;
1835 u64 t_vm_h_ipoll = 0;
1836 u64 t_vm_h_cppr = 0;
1837 u64 t_vm_h_eoi = 0;
1838 u64 t_vm_h_ipi = 0;
1839 unsigned int i;
1840
1841 if (!kvm)
1842 return 0;
1843
1844 seq_printf(m, "=========\nVCPU state\n=========\n");
1845
1846 kvm_for_each_vcpu(i, vcpu, kvm) {
1847 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1848 unsigned int i;
1849
1850 if (!xc)
1851 continue;
1852
1853 seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x"
1854 " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
1855 xc->server_num, xc->cppr, xc->hw_cppr,
1856 xc->mfrr, xc->pending,
1857 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
1858 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1859 struct xive_q *q = &xc->queues[i];
1860 u32 i0, i1, idx;
1861
1862 if (!q->qpage && !xc->esc_virq[i])
1863 continue;
1864
1865 seq_printf(m, " [q%d]: ", i);
1866
1867 if (q->qpage) {
1868 idx = q->idx;
1869 i0 = be32_to_cpup(q->qpage + idx);
1870 idx = (idx + 1) & q->msk;
1871 i1 = be32_to_cpup(q->qpage + idx);
1872 seq_printf(m, "T=%d %08x %08x... \n", q->toggle, i0, i1);
1873 }
1874 if (xc->esc_virq[i]) {
1875 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
1876 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1877 u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
1878 seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
1879 (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
1880 (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
1881 xc->esc_virq[i], pq, xd->eoi_page);
1882 seq_printf(m, "\n");
1883 }
1884 }
1885
1886 t_rm_h_xirr += xc->stat_rm_h_xirr;
1887 t_rm_h_ipoll += xc->stat_rm_h_ipoll;
1888 t_rm_h_cppr += xc->stat_rm_h_cppr;
1889 t_rm_h_eoi += xc->stat_rm_h_eoi;
1890 t_rm_h_ipi += xc->stat_rm_h_ipi;
1891 t_vm_h_xirr += xc->stat_vm_h_xirr;
1892 t_vm_h_ipoll += xc->stat_vm_h_ipoll;
1893 t_vm_h_cppr += xc->stat_vm_h_cppr;
1894 t_vm_h_eoi += xc->stat_vm_h_eoi;
1895 t_vm_h_ipi += xc->stat_vm_h_ipi;
1896 }
1897
1898 seq_printf(m, "Hcalls totals\n");
1899 seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
1900 seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
1901 seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
1902 seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
1903 seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
1904
1905 return 0;
1906}
1907
1908static int xive_debug_open(struct inode *inode, struct file *file)
1909{
1910 return single_open(file, xive_debug_show, inode->i_private);
1911}
1912
1913static const struct file_operations xive_debug_fops = {
1914 .open = xive_debug_open,
1915 .read = seq_read,
1916 .llseek = seq_lseek,
1917 .release = single_release,
1918};
1919
1920static void xive_debugfs_init(struct kvmppc_xive *xive)
1921{
1922 char *name;
1923
1924 name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
1925 if (!name) {
1926 pr_err("%s: no memory for name\n", __func__);
1927 return;
1928 }
1929
1930 xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
1931 xive, &xive_debug_fops);
1932
1933 pr_debug("%s: created %s\n", __func__, name);
1934 kfree(name);
1935}
1936
1937static void kvmppc_xive_init(struct kvm_device *dev)
1938{
1939 struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
1940
1941
1942 xive_debugfs_init(xive);
1943}
1944
1945struct kvm_device_ops kvm_xive_ops = {
1946 .name = "kvm-xive",
1947 .create = kvmppc_xive_create,
1948 .init = kvmppc_xive_init,
1949 .destroy = kvmppc_xive_free,
1950 .set_attr = xive_set_attr,
1951 .get_attr = xive_get_attr,
1952 .has_attr = xive_has_attr,
1953};
1954
1955void kvmppc_xive_init_module(void)
1956{
1957 __xive_vm_h_xirr = xive_vm_h_xirr;
1958 __xive_vm_h_ipoll = xive_vm_h_ipoll;
1959 __xive_vm_h_ipi = xive_vm_h_ipi;
1960 __xive_vm_h_cppr = xive_vm_h_cppr;
1961 __xive_vm_h_eoi = xive_vm_h_eoi;
1962}
1963
1964void kvmppc_xive_exit_module(void)
1965{
1966 __xive_vm_h_xirr = NULL;
1967 __xive_vm_h_ipoll = NULL;
1968 __xive_vm_h_ipi = NULL;
1969 __xive_vm_h_cppr = NULL;
1970 __xive_vm_h_eoi = NULL;
1971}
1972