1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) "xive-kvm: " fmt
10
11#include <linux/kernel.h>
12#include <linux/kvm_host.h>
13#include <linux/err.h>
14#include <linux/gfp.h>
15#include <linux/spinlock.h>
16#include <linux/delay.h>
17#include <linux/percpu.h>
18#include <linux/cpumask.h>
19#include <linux/uaccess.h>
20#include <asm/kvm_book3s.h>
21#include <asm/kvm_ppc.h>
22#include <asm/hvcall.h>
23#include <asm/xics.h>
24#include <asm/xive.h>
25#include <asm/xive-regs.h>
26#include <asm/debug.h>
27#include <asm/debugfs.h>
28#include <asm/time.h>
29#include <asm/opal.h>
30
31#include <linux/debugfs.h>
32#include <linux/seq_file.h>
33
34#include "book3s_xive.h"
35
36
37
38
39
40
41
42
43
44#define XIVE_RUNTIME_CHECKS
45#define X_PFX xive_vm_
46#define X_STATIC static
47#define X_STAT_PFX stat_vm_
48#define __x_tima xive_tima
49#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
50#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
51#define __x_writeb __raw_writeb
52#define __x_readw __raw_readw
53#define __x_readq __raw_readq
54#define __x_writeq __raw_writeq
55
56#include "book3s_xive_template.c"
57
58
59
60
61
62#define XIVE_Q_GAP 2
63
64
65
66
67
68static bool xive_irq_trigger(struct xive_irq_data *xd)
69{
70
71 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
72 return false;
73
74
75 if (WARN_ON(!xd->trig_mmio))
76 return false;
77
78 out_be64(xd->trig_mmio, 0);
79
80 return true;
81}
82
83static irqreturn_t xive_esc_irq(int irq, void *data)
84{
85 struct kvm_vcpu *vcpu = data;
86
87 vcpu->arch.irq_pending = 1;
88 smp_mb();
89 if (vcpu->arch.ceded)
90 kvmppc_fast_vcpu_kick(vcpu);
91
92
93
94
95
96
97
98
99
100
101 vcpu->arch.xive_esc_on = false;
102
103 return IRQ_HANDLED;
104}
105
106static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio)
107{
108 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
109 struct xive_q *q = &xc->queues[prio];
110 char *name = NULL;
111 int rc;
112
113
114 if (xc->esc_virq[prio])
115 return 0;
116
117
118 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
119 if (!xc->esc_virq[prio]) {
120 pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
121 prio, xc->server_num);
122 return -EIO;
123 }
124
125 if (xc->xive->single_escalation)
126 name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
127 vcpu->kvm->arch.lpid, xc->server_num);
128 else
129 name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
130 vcpu->kvm->arch.lpid, xc->server_num, prio);
131 if (!name) {
132 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
133 prio, xc->server_num);
134 rc = -ENOMEM;
135 goto error;
136 }
137
138 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
139
140 rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
141 IRQF_NO_THREAD, name, vcpu);
142 if (rc) {
143 pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
144 prio, xc->server_num);
145 goto error;
146 }
147 xc->esc_virq_names[prio] = name;
148
149
150
151
152
153
154
155
156
157 if (xc->xive->single_escalation) {
158 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
159 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
160
161 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
162 vcpu->arch.xive_esc_raddr = xd->eoi_page;
163 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
164 xd->flags |= XIVE_IRQ_NO_EOI;
165 }
166
167 return 0;
168error:
169 irq_dispose_mapping(xc->esc_virq[prio]);
170 xc->esc_virq[prio] = 0;
171 kfree(name);
172 return rc;
173}
174
175static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
176{
177 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
178 struct kvmppc_xive *xive = xc->xive;
179 struct xive_q *q = &xc->queues[prio];
180 void *qpage;
181 int rc;
182
183 if (WARN_ON(q->qpage))
184 return 0;
185
186
187 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
188 if (!qpage) {
189 pr_err("Failed to allocate queue %d for VCPU %d\n",
190 prio, xc->server_num);
191 return -ENOMEM;
192 }
193 memset(qpage, 0, 1 << xive->q_order);
194
195
196
197
198
199
200
201
202 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
203 xive->q_order, true);
204 if (rc)
205 pr_err("Failed to configure queue %d for VCPU %d\n",
206 prio, xc->server_num);
207 return rc;
208}
209
210
211static int xive_check_provisioning(struct kvm *kvm, u8 prio)
212{
213 struct kvmppc_xive *xive = kvm->arch.xive;
214 struct kvm_vcpu *vcpu;
215 int i, rc;
216
217 lockdep_assert_held(&kvm->lock);
218
219
220 if (xive->qmap & (1 << prio))
221 return 0;
222
223 pr_devel("Provisioning prio... %d\n", prio);
224
225
226 kvm_for_each_vcpu(i, vcpu, kvm) {
227 if (!vcpu->arch.xive_vcpu)
228 continue;
229 rc = xive_provision_queue(vcpu, prio);
230 if (rc == 0 && !xive->single_escalation)
231 xive_attach_escalation(vcpu, prio);
232 if (rc)
233 return rc;
234 }
235
236
237 mb();
238 xive->qmap |= (1 << prio);
239 return 0;
240}
241
242static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
243{
244 struct kvm_vcpu *vcpu;
245 struct kvmppc_xive_vcpu *xc;
246 struct xive_q *q;
247
248
249 vcpu = kvmppc_xive_find_server(kvm, server);
250 if (!vcpu) {
251 pr_warn("%s: Can't find server %d\n", __func__, server);
252 return;
253 }
254 xc = vcpu->arch.xive_vcpu;
255 if (WARN_ON(!xc))
256 return;
257
258 q = &xc->queues[prio];
259 atomic_inc(&q->pending_count);
260}
261
262static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
263{
264 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
265 struct xive_q *q;
266 u32 max;
267
268 if (WARN_ON(!xc))
269 return -ENXIO;
270 if (!xc->valid)
271 return -ENXIO;
272
273 q = &xc->queues[prio];
274 if (WARN_ON(!q->qpage))
275 return -ENXIO;
276
277
278 max = (q->msk + 1) - XIVE_Q_GAP;
279 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
280}
281
282static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
283{
284 struct kvm_vcpu *vcpu;
285 int i, rc;
286
287
288 vcpu = kvmppc_xive_find_server(kvm, *server);
289 if (!vcpu) {
290 pr_devel("Can't find server %d\n", *server);
291 return -EINVAL;
292 }
293
294 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
295
296
297 rc = xive_try_pick_queue(vcpu, prio);
298 if (rc == 0)
299 return rc;
300
301 pr_devel(" .. failed, looking up candidate...\n");
302
303
304 kvm_for_each_vcpu(i, vcpu, kvm) {
305 if (!vcpu->arch.xive_vcpu)
306 continue;
307 rc = xive_try_pick_queue(vcpu, prio);
308 if (rc == 0) {
309 *server = vcpu->arch.xive_vcpu->server_num;
310 pr_devel(" found on 0x%x/%d\n", *server, prio);
311 return rc;
312 }
313 }
314 pr_devel(" no available target !\n");
315
316
317 return -EBUSY;
318}
319
320static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
321 struct kvmppc_xive_src_block *sb,
322 struct kvmppc_xive_irq_state *state)
323{
324 struct xive_irq_data *xd;
325 u32 hw_num;
326 u8 old_prio;
327 u64 val;
328
329
330
331
332
333 for (;;) {
334 arch_spin_lock(&sb->lock);
335 old_prio = state->guest_priority;
336 state->guest_priority = MASKED;
337 mb();
338 if (!state->in_eoi)
339 break;
340 state->guest_priority = old_prio;
341 arch_spin_unlock(&sb->lock);
342 }
343
344
345 if (old_prio == MASKED)
346 return old_prio;
347
348
349 kvmppc_xive_select_irq(state, &hw_num, &xd);
350
351
352
353
354
355
356
357
358
359
360
361
362
363 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
364 xive_native_configure_irq(hw_num,
365 xive->vp_base + state->act_server,
366 MASKED, state->number);
367
368 state->old_p = true;
369 state->old_q = false;
370 } else {
371
372 val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
373 state->old_p = !!(val & 2);
374 state->old_q = !!(val & 1);
375
376
377
378
379
380 xive_native_sync_source(hw_num);
381 }
382
383 return old_prio;
384}
385
386static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
387 struct kvmppc_xive_irq_state *state)
388{
389
390
391
392 for (;;) {
393 arch_spin_lock(&sb->lock);
394 if (!state->in_eoi)
395 break;
396 arch_spin_unlock(&sb->lock);
397 }
398}
399
400static void xive_finish_unmask(struct kvmppc_xive *xive,
401 struct kvmppc_xive_src_block *sb,
402 struct kvmppc_xive_irq_state *state,
403 u8 prio)
404{
405 struct xive_irq_data *xd;
406 u32 hw_num;
407
408
409 if (state->guest_priority != MASKED)
410 goto bail;
411
412
413 kvmppc_xive_select_irq(state, &hw_num, &xd);
414
415
416
417
418
419 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
420 xive_native_configure_irq(hw_num,
421 xive->vp_base + state->act_server,
422 state->act_priority, state->number);
423
424 if (!state->old_p)
425 xive_vm_source_eoi(hw_num, xd);
426
427 if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
428 xive_irq_trigger(xd);
429 goto bail;
430 }
431
432
433 if (state->old_q)
434 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
435
436
437
438
439
440
441 if (!state->old_p)
442 xive_vm_source_eoi(hw_num, xd);
443
444
445 mb();
446bail:
447 state->guest_priority = prio;
448}
449
450
451
452
453
454
455
456
457static int xive_target_interrupt(struct kvm *kvm,
458 struct kvmppc_xive_irq_state *state,
459 u32 server, u8 prio)
460{
461 struct kvmppc_xive *xive = kvm->arch.xive;
462 u32 hw_num;
463 int rc;
464
465
466
467
468
469
470 rc = xive_select_target(kvm, &server, prio);
471
472
473
474
475
476 if (rc)
477 return rc;
478
479
480
481
482
483
484 if (state->act_priority != MASKED)
485 xive_inc_q_pending(kvm,
486 state->act_server,
487 state->act_priority);
488
489
490
491 state->act_priority = prio;
492 state->act_server = server;
493
494
495 kvmppc_xive_select_irq(state, &hw_num, NULL);
496
497 return xive_native_configure_irq(hw_num,
498 xive->vp_base + server,
499 prio, state->number);
500}
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
543 u32 priority)
544{
545 struct kvmppc_xive *xive = kvm->arch.xive;
546 struct kvmppc_xive_src_block *sb;
547 struct kvmppc_xive_irq_state *state;
548 u8 new_act_prio;
549 int rc = 0;
550 u16 idx;
551
552 if (!xive)
553 return -ENODEV;
554
555 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
556 irq, server, priority);
557
558
559 if (priority != MASKED)
560 rc = xive_check_provisioning(xive->kvm,
561 xive_prio_from_guest(priority));
562 if (rc) {
563 pr_devel(" provisioning failure %d !\n", rc);
564 return rc;
565 }
566
567 sb = kvmppc_xive_find_source(xive, irq, &idx);
568 if (!sb)
569 return -EINVAL;
570 state = &sb->irq_state[idx];
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586 if (priority == MASKED)
587 xive_lock_and_mask(xive, sb, state);
588 else
589 xive_lock_for_unmask(sb, state);
590
591
592
593
594
595
596
597 new_act_prio = state->act_priority;
598 if (priority != MASKED)
599 new_act_prio = xive_prio_from_guest(priority);
600
601 pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
602 new_act_prio, state->act_server, state->act_priority);
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618 if (new_act_prio != MASKED &&
619 (state->act_server != server ||
620 state->act_priority != new_act_prio))
621 rc = xive_target_interrupt(kvm, state, server, new_act_prio);
622
623
624
625
626
627 if (priority != MASKED)
628 xive_finish_unmask(xive, sb, state, priority);
629
630
631
632
633
634 state->saved_priority = priority;
635
636 arch_spin_unlock(&sb->lock);
637 return rc;
638}
639
640int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
641 u32 *priority)
642{
643 struct kvmppc_xive *xive = kvm->arch.xive;
644 struct kvmppc_xive_src_block *sb;
645 struct kvmppc_xive_irq_state *state;
646 u16 idx;
647
648 if (!xive)
649 return -ENODEV;
650
651 sb = kvmppc_xive_find_source(xive, irq, &idx);
652 if (!sb)
653 return -EINVAL;
654 state = &sb->irq_state[idx];
655 arch_spin_lock(&sb->lock);
656 *server = state->act_server;
657 *priority = state->guest_priority;
658 arch_spin_unlock(&sb->lock);
659
660 return 0;
661}
662
663int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
664{
665 struct kvmppc_xive *xive = kvm->arch.xive;
666 struct kvmppc_xive_src_block *sb;
667 struct kvmppc_xive_irq_state *state;
668 u16 idx;
669
670 if (!xive)
671 return -ENODEV;
672
673 sb = kvmppc_xive_find_source(xive, irq, &idx);
674 if (!sb)
675 return -EINVAL;
676 state = &sb->irq_state[idx];
677
678 pr_devel("int_on(irq=0x%x)\n", irq);
679
680
681
682
683 if (state->act_priority == MASKED) {
684 pr_devel("int_on on untargetted interrupt\n");
685 return -EINVAL;
686 }
687
688
689 if (state->saved_priority == MASKED)
690 return 0;
691
692
693
694
695 xive_lock_for_unmask(sb, state);
696 xive_finish_unmask(xive, sb, state, state->saved_priority);
697 arch_spin_unlock(&sb->lock);
698
699 return 0;
700}
701
702int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
703{
704 struct kvmppc_xive *xive = kvm->arch.xive;
705 struct kvmppc_xive_src_block *sb;
706 struct kvmppc_xive_irq_state *state;
707 u16 idx;
708
709 if (!xive)
710 return -ENODEV;
711
712 sb = kvmppc_xive_find_source(xive, irq, &idx);
713 if (!sb)
714 return -EINVAL;
715 state = &sb->irq_state[idx];
716
717 pr_devel("int_off(irq=0x%x)\n", irq);
718
719
720
721
722 state->saved_priority = xive_lock_and_mask(xive, sb, state);
723 arch_spin_unlock(&sb->lock);
724
725 return 0;
726}
727
728static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
729{
730 struct kvmppc_xive_src_block *sb;
731 struct kvmppc_xive_irq_state *state;
732 u16 idx;
733
734 sb = kvmppc_xive_find_source(xive, irq, &idx);
735 if (!sb)
736 return false;
737 state = &sb->irq_state[idx];
738 if (!state->valid)
739 return false;
740
741
742
743
744
745 xive_irq_trigger(&state->ipi_data);
746
747 return true;
748}
749
750u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
751{
752 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
753
754 if (!xc)
755 return 0;
756
757
758 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
759 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
760 (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
761}
762
763int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
764{
765 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
766 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
767 u8 cppr, mfrr;
768 u32 xisr;
769
770 if (!xc || !xive)
771 return -ENOENT;
772
773
774 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
775 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
776 KVM_REG_PPC_ICP_XISR_MASK;
777 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
778
779 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
780 xc->server_num, cppr, mfrr, xisr);
781
782
783
784
785
786 if (WARN_ON(vcpu->arch.xive_pushed))
787 return -EIO;
788
789
790 vcpu->arch.xive_saved_state.cppr = cppr;
791 xc->hw_cppr = xc->cppr = cppr;
792
793
794
795
796
797
798
799 xc->mfrr = mfrr;
800 if (mfrr < cppr)
801 xive_irq_trigger(&xc->vp_ipi_data);
802
803
804
805
806
807
808
809
810
811
812 if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
813 xc->delayed_irq = xisr;
814 xive->delayed_irqs++;
815 pr_devel(" xisr restore delayed\n");
816 }
817
818 return 0;
819}
820
821int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
822 struct irq_desc *host_desc)
823{
824 struct kvmppc_xive *xive = kvm->arch.xive;
825 struct kvmppc_xive_src_block *sb;
826 struct kvmppc_xive_irq_state *state;
827 struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
828 unsigned int host_irq = irq_desc_get_irq(host_desc);
829 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
830 u16 idx;
831 u8 prio;
832 int rc;
833
834 if (!xive)
835 return -ENODEV;
836
837 pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
838
839 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
840 if (!sb)
841 return -EINVAL;
842 state = &sb->irq_state[idx];
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857 rc = irq_set_vcpu_affinity(host_irq, state);
858 if (rc) {
859 pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
860 return rc;
861 }
862
863
864
865
866
867
868 prio = xive_lock_and_mask(xive, sb, state);
869 pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
870 state->old_p, state->old_q);
871
872
873 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
874
875
876 state->pt_number = hw_irq;
877 state->pt_data = irq_data_get_irq_handler_data(host_data);
878
879
880
881
882
883
884
885 xive_native_configure_irq(hw_irq,
886 xive->vp_base + state->act_server,
887 state->act_priority, state->number);
888
889
890
891
892
893
894
895
896 if (prio != MASKED && !state->old_p)
897 xive_vm_source_eoi(hw_irq, state->pt_data);
898
899
900 state->old_p = state->old_q = false;
901
902
903 mb();
904 state->guest_priority = prio;
905 arch_spin_unlock(&sb->lock);
906
907 return 0;
908}
909EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
910
911int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
912 struct irq_desc *host_desc)
913{
914 struct kvmppc_xive *xive = kvm->arch.xive;
915 struct kvmppc_xive_src_block *sb;
916 struct kvmppc_xive_irq_state *state;
917 unsigned int host_irq = irq_desc_get_irq(host_desc);
918 u16 idx;
919 u8 prio;
920 int rc;
921
922 if (!xive)
923 return -ENODEV;
924
925 pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
926
927 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
928 if (!sb)
929 return -EINVAL;
930 state = &sb->irq_state[idx];
931
932
933
934
935
936
937 prio = xive_lock_and_mask(xive, sb, state);
938 pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
939 state->old_p, state->old_q);
940
941
942
943
944
945
946 if (state->old_p)
947 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
948
949
950 rc = irq_set_vcpu_affinity(host_irq, NULL);
951 if (rc) {
952 pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
953 return rc;
954 }
955
956
957 state->pt_number = 0;
958 state->pt_data = NULL;
959
960
961 xive_native_configure_irq(state->ipi_number,
962 xive->vp_base + state->act_server,
963 state->act_priority, state->number);
964
965
966
967
968
969
970 if (prio == MASKED || state->old_p)
971 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
972 else
973 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
974
975
976 mb();
977 state->guest_priority = prio;
978 arch_spin_unlock(&sb->lock);
979
980 return 0;
981}
982EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
983
984static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
985{
986 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
987 struct kvm *kvm = vcpu->kvm;
988 struct kvmppc_xive *xive = kvm->arch.xive;
989 int i, j;
990
991 for (i = 0; i <= xive->max_sbid; i++) {
992 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
993
994 if (!sb)
995 continue;
996 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
997 struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
998
999 if (!state->valid)
1000 continue;
1001 if (state->act_priority == MASKED)
1002 continue;
1003 if (state->act_server != xc->server_num)
1004 continue;
1005
1006
1007 arch_spin_lock(&sb->lock);
1008 state->act_priority = MASKED;
1009 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1010 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
1011 if (state->pt_number) {
1012 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
1013 xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
1014 }
1015 arch_spin_unlock(&sb->lock);
1016 }
1017 }
1018}
1019
1020void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1021{
1022 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1023 struct kvmppc_xive *xive = xc->xive;
1024 int i;
1025
1026 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
1027
1028
1029 xc->valid = false;
1030 kvmppc_xive_disable_vcpu_interrupts(vcpu);
1031
1032
1033 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1034
1035
1036 xive_native_disable_vp(xc->vp_id);
1037
1038
1039 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1040 struct xive_q *q = &xc->queues[i];
1041
1042
1043 if (xc->esc_virq[i]) {
1044 free_irq(xc->esc_virq[i], vcpu);
1045 irq_dispose_mapping(xc->esc_virq[i]);
1046 kfree(xc->esc_virq_names[i]);
1047 }
1048
1049 xive_native_disable_queue(xc->vp_id, q, i);
1050 if (q->qpage) {
1051 free_pages((unsigned long)q->qpage,
1052 xive->q_page_order);
1053 q->qpage = NULL;
1054 }
1055 }
1056
1057
1058 if (xc->vp_ipi) {
1059 xive_cleanup_irq_data(&xc->vp_ipi_data);
1060 xive_native_free_irq(xc->vp_ipi);
1061 }
1062
1063 kfree(xc);
1064}
1065
1066int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1067 struct kvm_vcpu *vcpu, u32 cpu)
1068{
1069 struct kvmppc_xive *xive = dev->private;
1070 struct kvmppc_xive_vcpu *xc;
1071 int i, r = -EBUSY;
1072
1073 pr_devel("connect_vcpu(cpu=%d)\n", cpu);
1074
1075 if (dev->ops != &kvm_xive_ops) {
1076 pr_devel("Wrong ops !\n");
1077 return -EPERM;
1078 }
1079 if (xive->kvm != vcpu->kvm)
1080 return -EPERM;
1081 if (vcpu->arch.irq_type)
1082 return -EBUSY;
1083 if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
1084 pr_devel("Duplicate !\n");
1085 return -EEXIST;
1086 }
1087 if (cpu >= KVM_MAX_VCPUS) {
1088 pr_devel("Out of bounds !\n");
1089 return -EINVAL;
1090 }
1091 xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1092 if (!xc)
1093 return -ENOMEM;
1094
1095
1096 mutex_lock(&vcpu->kvm->lock);
1097 vcpu->arch.xive_vcpu = xc;
1098 xc->xive = xive;
1099 xc->vcpu = vcpu;
1100 xc->server_num = cpu;
1101 xc->vp_id = xive->vp_base + cpu;
1102 xc->mfrr = 0xff;
1103 xc->valid = true;
1104
1105 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1106 if (r)
1107 goto bail;
1108
1109
1110 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
1111 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1112
1113
1114 xc->vp_ipi = xive_native_alloc_irq();
1115 if (!xc->vp_ipi) {
1116 pr_err("Failed to allocate xive irq for VCPU IPI\n");
1117 r = -EIO;
1118 goto bail;
1119 }
1120 pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1121
1122 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1123 if (r)
1124 goto bail;
1125
1126
1127
1128
1129
1130 r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
1131 if (r) {
1132 pr_err("Failed to enable VP in OPAL, err %d\n", r);
1133 goto bail;
1134 }
1135
1136
1137
1138
1139
1140
1141
1142
1143 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1144 struct xive_q *q = &xc->queues[i];
1145
1146
1147 if (i == 7 && xive->single_escalation)
1148 break;
1149
1150
1151 if (xive->qmap & (1 << i)) {
1152 r = xive_provision_queue(vcpu, i);
1153 if (r == 0 && !xive->single_escalation)
1154 xive_attach_escalation(vcpu, i);
1155 if (r)
1156 goto bail;
1157 } else {
1158 r = xive_native_configure_queue(xc->vp_id,
1159 q, i, NULL, 0, true);
1160 if (r) {
1161 pr_err("Failed to configure queue %d for VCPU %d\n",
1162 i, cpu);
1163 goto bail;
1164 }
1165 }
1166 }
1167
1168
1169 r = xive_attach_escalation(vcpu, 0);
1170 if (r)
1171 goto bail;
1172
1173
1174 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
1175 if (!r)
1176 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
1177
1178bail:
1179 mutex_unlock(&vcpu->kvm->lock);
1180 if (r) {
1181 kvmppc_xive_cleanup_vcpu(vcpu);
1182 return r;
1183 }
1184
1185 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1186 return 0;
1187}
1188
1189
1190
1191
1192static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
1193{
1194 struct kvmppc_xive_src_block *sb;
1195 struct kvmppc_xive_irq_state *state;
1196 u16 idx;
1197
1198 sb = kvmppc_xive_find_source(xive, irq, &idx);
1199 if (!sb)
1200 return;
1201
1202 state = &sb->irq_state[idx];
1203
1204
1205 if (!state->valid) {
1206 pr_err("invalid irq 0x%x in cpu queue!\n", irq);
1207 return;
1208 }
1209
1210
1211
1212
1213
1214
1215 if (!state->saved_p)
1216 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
1217
1218
1219 state->in_queue = true;
1220}
1221
1222static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
1223 struct kvmppc_xive_src_block *sb,
1224 u32 irq)
1225{
1226 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1227
1228 if (!state->valid)
1229 return;
1230
1231
1232 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
1233
1234
1235 state->saved_p = state->old_p;
1236 state->saved_q = state->old_q;
1237
1238
1239 arch_spin_unlock(&sb->lock);
1240}
1241
1242static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
1243 struct kvmppc_xive_src_block *sb,
1244 u32 irq)
1245{
1246 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1247
1248 if (!state->valid)
1249 return;
1250
1251
1252
1253
1254
1255
1256 xive_lock_for_unmask(sb, state);
1257
1258
1259 if (state->saved_scan_prio != MASKED)
1260 xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
1261
1262
1263 arch_spin_unlock(&sb->lock);
1264}
1265
1266static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
1267{
1268 u32 idx = q->idx;
1269 u32 toggle = q->toggle;
1270 u32 irq;
1271
1272 do {
1273 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
1274 if (irq > XICS_IPI)
1275 xive_pre_save_set_queued(xive, irq);
1276 } while(irq);
1277}
1278
1279static void xive_pre_save_scan(struct kvmppc_xive *xive)
1280{
1281 struct kvm_vcpu *vcpu = NULL;
1282 int i, j;
1283
1284
1285
1286
1287
1288 for (i = 0; i <= xive->max_sbid; i++) {
1289 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1290 if (!sb)
1291 continue;
1292 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1293 xive_pre_save_mask_irq(xive, sb, j);
1294 }
1295
1296
1297 kvm_for_each_vcpu(i, vcpu, xive->kvm) {
1298 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1299 if (!xc)
1300 continue;
1301 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
1302 if (xc->queues[j].qpage)
1303 xive_pre_save_queue(xive, &xc->queues[j]);
1304 }
1305 }
1306
1307
1308 for (i = 0; i <= xive->max_sbid; i++) {
1309 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1310 if (!sb)
1311 continue;
1312 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1313 xive_pre_save_unmask_irq(xive, sb, j);
1314 }
1315}
1316
1317static void xive_post_save_scan(struct kvmppc_xive *xive)
1318{
1319 u32 i, j;
1320
1321
1322 for (i = 0; i <= xive->max_sbid; i++) {
1323 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1324 if (!sb)
1325 continue;
1326 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1327 sb->irq_state[j].in_queue = false;
1328 }
1329
1330
1331 xive->saved_src_count = 0;
1332}
1333
1334
1335
1336
1337static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1338{
1339 struct kvmppc_xive_src_block *sb;
1340 struct kvmppc_xive_irq_state *state;
1341 u64 __user *ubufp = (u64 __user *) addr;
1342 u64 val, prio;
1343 u16 idx;
1344
1345 sb = kvmppc_xive_find_source(xive, irq, &idx);
1346 if (!sb)
1347 return -ENOENT;
1348
1349 state = &sb->irq_state[idx];
1350
1351 if (!state->valid)
1352 return -ENOENT;
1353
1354 pr_devel("get_source(%ld)...\n", irq);
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372 if (xive->saved_src_count == 0)
1373 xive_pre_save_scan(xive);
1374 xive->saved_src_count++;
1375
1376
1377 val = state->act_server;
1378 prio = state->saved_scan_prio;
1379
1380 if (prio == MASKED) {
1381 val |= KVM_XICS_MASKED;
1382 prio = state->saved_priority;
1383 }
1384 val |= prio << KVM_XICS_PRIORITY_SHIFT;
1385 if (state->lsi) {
1386 val |= KVM_XICS_LEVEL_SENSITIVE;
1387 if (state->saved_p)
1388 val |= KVM_XICS_PENDING;
1389 } else {
1390 if (state->saved_p)
1391 val |= KVM_XICS_PRESENTED;
1392
1393 if (state->saved_q)
1394 val |= KVM_XICS_QUEUED;
1395
1396
1397
1398
1399
1400
1401
1402 if (state->in_queue || (prio == MASKED && state->saved_q))
1403 val |= KVM_XICS_PENDING;
1404 }
1405
1406
1407
1408
1409
1410 if (xive->saved_src_count == xive->src_count)
1411 xive_post_save_scan(xive);
1412
1413
1414 if (put_user(val, ubufp))
1415 return -EFAULT;
1416
1417 return 0;
1418}
1419
1420static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *xive,
1421 int irq)
1422{
1423 struct kvm *kvm = xive->kvm;
1424 struct kvmppc_xive_src_block *sb;
1425 int i, bid;
1426
1427 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
1428
1429 mutex_lock(&kvm->lock);
1430
1431
1432 if (xive->src_blocks[bid])
1433 goto out;
1434
1435
1436 sb = kzalloc(sizeof(*sb), GFP_KERNEL);
1437 if (!sb)
1438 goto out;
1439
1440 sb->id = bid;
1441
1442 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1443 sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
1444 sb->irq_state[i].guest_priority = MASKED;
1445 sb->irq_state[i].saved_priority = MASKED;
1446 sb->irq_state[i].act_priority = MASKED;
1447 }
1448 smp_wmb();
1449 xive->src_blocks[bid] = sb;
1450
1451 if (bid > xive->max_sbid)
1452 xive->max_sbid = bid;
1453
1454out:
1455 mutex_unlock(&kvm->lock);
1456 return xive->src_blocks[bid];
1457}
1458
1459static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
1460{
1461 struct kvm *kvm = xive->kvm;
1462 struct kvm_vcpu *vcpu = NULL;
1463 int i;
1464
1465 kvm_for_each_vcpu(i, vcpu, kvm) {
1466 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1467
1468 if (!xc)
1469 continue;
1470
1471 if (xc->delayed_irq == irq) {
1472 xc->delayed_irq = 0;
1473 xive->delayed_irqs--;
1474 return true;
1475 }
1476 }
1477 return false;
1478}
1479
1480static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1481{
1482 struct kvmppc_xive_src_block *sb;
1483 struct kvmppc_xive_irq_state *state;
1484 u64 __user *ubufp = (u64 __user *) addr;
1485 u16 idx;
1486 u64 val;
1487 u8 act_prio, guest_prio;
1488 u32 server;
1489 int rc = 0;
1490
1491 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1492 return -ENOENT;
1493
1494 pr_devel("set_source(irq=0x%lx)\n", irq);
1495
1496
1497 sb = kvmppc_xive_find_source(xive, irq, &idx);
1498 if (!sb) {
1499 pr_devel("No source, creating source block...\n");
1500 sb = xive_create_src_block(xive, irq);
1501 if (!sb) {
1502 pr_devel("Failed to create block...\n");
1503 return -ENOMEM;
1504 }
1505 }
1506 state = &sb->irq_state[idx];
1507
1508
1509 if (get_user(val, ubufp)) {
1510 pr_devel("fault getting user info !\n");
1511 return -EFAULT;
1512 }
1513
1514 server = val & KVM_XICS_DESTINATION_MASK;
1515 guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
1516
1517 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1518 val, server, guest_prio);
1519
1520
1521
1522
1523
1524 if (!state->ipi_number) {
1525 state->ipi_number = xive_native_alloc_irq();
1526 if (state->ipi_number == 0) {
1527 pr_devel("Failed to allocate IPI !\n");
1528 return -ENOMEM;
1529 }
1530 xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
1531 pr_devel(" src_ipi=0x%x\n", state->ipi_number);
1532 }
1533
1534
1535
1536
1537
1538
1539
1540
1541 state->guest_priority = 0;
1542 xive_lock_and_mask(xive, sb, state);
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552 act_prio = xive_prio_from_guest(guest_prio);
1553 state->act_priority = MASKED;
1554
1555
1556
1557
1558
1559
1560 arch_spin_unlock(&sb->lock);
1561
1562
1563 if (act_prio != MASKED) {
1564
1565 mutex_lock(&xive->kvm->lock);
1566 rc = xive_check_provisioning(xive->kvm, act_prio);
1567 mutex_unlock(&xive->kvm->lock);
1568
1569
1570 if (rc == 0)
1571 rc = xive_target_interrupt(xive->kvm, state,
1572 server, act_prio);
1573
1574
1575
1576
1577
1578 }
1579
1580
1581
1582
1583
1584 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
1585 val |= KVM_XICS_PENDING;
1586 pr_devel(" Found delayed ! forcing PENDING !\n");
1587 }
1588
1589
1590 state->old_p = false;
1591 state->old_q = false;
1592 state->lsi = false;
1593 state->asserted = false;
1594
1595
1596 if (val & KVM_XICS_LEVEL_SENSITIVE) {
1597 state->lsi = true;
1598 if (val & KVM_XICS_PENDING)
1599 state->asserted = true;
1600 pr_devel(" LSI ! Asserted=%d\n", state->asserted);
1601 }
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613 if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
1614 state->old_p = true;
1615 if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
1616 state->old_q = true;
1617
1618 pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q);
1619
1620
1621
1622
1623
1624
1625 if (val & KVM_XICS_MASKED) {
1626 pr_devel(" masked, saving prio\n");
1627 state->guest_priority = MASKED;
1628 state->saved_priority = guest_prio;
1629 } else {
1630 pr_devel(" unmasked, restoring to prio %d\n", guest_prio);
1631 xive_finish_unmask(xive, sb, state, guest_prio);
1632 state->saved_priority = guest_prio;
1633 }
1634
1635
1636 if (!state->valid)
1637 xive->src_count++;
1638 state->valid = true;
1639
1640 return 0;
1641}
1642
1643int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1644 bool line_status)
1645{
1646 struct kvmppc_xive *xive = kvm->arch.xive;
1647 struct kvmppc_xive_src_block *sb;
1648 struct kvmppc_xive_irq_state *state;
1649 u16 idx;
1650
1651 if (!xive)
1652 return -ENODEV;
1653
1654 sb = kvmppc_xive_find_source(xive, irq, &idx);
1655 if (!sb)
1656 return -EINVAL;
1657
1658
1659 state = &sb->irq_state[idx];
1660 if (!state->valid)
1661 return -EINVAL;
1662
1663
1664 if (state->pt_number)
1665 return -EINVAL;
1666
1667 if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
1668 state->asserted = 1;
1669 else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
1670 state->asserted = 0;
1671 return 0;
1672 }
1673
1674
1675 xive_irq_trigger(&state->ipi_data);
1676
1677 return 0;
1678}
1679
1680static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1681{
1682 struct kvmppc_xive *xive = dev->private;
1683
1684
1685 switch (attr->group) {
1686 case KVM_DEV_XICS_GRP_SOURCES:
1687 return xive_set_source(xive, attr->attr, attr->addr);
1688 }
1689 return -ENXIO;
1690}
1691
1692static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1693{
1694 struct kvmppc_xive *xive = dev->private;
1695
1696
1697 switch (attr->group) {
1698 case KVM_DEV_XICS_GRP_SOURCES:
1699 return xive_get_source(xive, attr->attr, attr->addr);
1700 }
1701 return -ENXIO;
1702}
1703
1704static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1705{
1706
1707 switch (attr->group) {
1708 case KVM_DEV_XICS_GRP_SOURCES:
1709 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1710 attr->attr < KVMPPC_XICS_NR_IRQS)
1711 return 0;
1712 break;
1713 }
1714 return -ENXIO;
1715}
1716
1717static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
1718{
1719 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
1720 xive_native_configure_irq(hw_num, 0, MASKED, 0);
1721 xive_cleanup_irq_data(xd);
1722}
1723
1724static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
1725{
1726 int i;
1727
1728 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1729 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
1730
1731 if (!state->valid)
1732 continue;
1733
1734 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
1735 xive_native_free_irq(state->ipi_number);
1736
1737
1738 if (state->pt_number)
1739 kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
1740
1741 state->valid = false;
1742 }
1743}
1744
1745static void kvmppc_xive_free(struct kvm_device *dev)
1746{
1747 struct kvmppc_xive *xive = dev->private;
1748 struct kvm *kvm = xive->kvm;
1749 int i;
1750
1751 debugfs_remove(xive->dentry);
1752
1753 if (kvm)
1754 kvm->arch.xive = NULL;
1755
1756
1757 for (i = 0; i <= xive->max_sbid; i++) {
1758 if (xive->src_blocks[i])
1759 kvmppc_xive_free_sources(xive->src_blocks[i]);
1760 kfree(xive->src_blocks[i]);
1761 xive->src_blocks[i] = NULL;
1762 }
1763
1764 if (xive->vp_base != XIVE_INVALID_VP)
1765 xive_native_free_vp_block(xive->vp_base);
1766
1767
1768 kfree(xive);
1769 kfree(dev);
1770}
1771
1772static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
1773{
1774 struct kvmppc_xive *xive;
1775 struct kvm *kvm = dev->kvm;
1776 int ret = 0;
1777
1778 pr_devel("Creating xive for partition\n");
1779
1780 xive = kzalloc(sizeof(*xive), GFP_KERNEL);
1781 if (!xive)
1782 return -ENOMEM;
1783
1784 dev->private = xive;
1785 xive->dev = dev;
1786 xive->kvm = kvm;
1787
1788
1789 if (kvm->arch.xive)
1790 ret = -EEXIST;
1791 else
1792 kvm->arch.xive = xive;
1793
1794
1795 xive->q_order = xive_native_default_eq_shift();
1796 if (xive->q_order < PAGE_SHIFT)
1797 xive->q_page_order = 0;
1798 else
1799 xive->q_page_order = xive->q_order - PAGE_SHIFT;
1800
1801
1802 xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS);
1803 pr_devel("VP_Base=%x\n", xive->vp_base);
1804
1805 if (xive->vp_base == XIVE_INVALID_VP)
1806 ret = -ENOMEM;
1807
1808 xive->single_escalation = xive_native_has_single_escalation();
1809
1810 if (ret) {
1811 kfree(xive);
1812 return ret;
1813 }
1814
1815 return 0;
1816}
1817
1818
1819static int xive_debug_show(struct seq_file *m, void *private)
1820{
1821 struct kvmppc_xive *xive = m->private;
1822 struct kvm *kvm = xive->kvm;
1823 struct kvm_vcpu *vcpu;
1824 u64 t_rm_h_xirr = 0;
1825 u64 t_rm_h_ipoll = 0;
1826 u64 t_rm_h_cppr = 0;
1827 u64 t_rm_h_eoi = 0;
1828 u64 t_rm_h_ipi = 0;
1829 u64 t_vm_h_xirr = 0;
1830 u64 t_vm_h_ipoll = 0;
1831 u64 t_vm_h_cppr = 0;
1832 u64 t_vm_h_eoi = 0;
1833 u64 t_vm_h_ipi = 0;
1834 unsigned int i;
1835
1836 if (!kvm)
1837 return 0;
1838
1839 seq_printf(m, "=========\nVCPU state\n=========\n");
1840
1841 kvm_for_each_vcpu(i, vcpu, kvm) {
1842 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1843 unsigned int i;
1844
1845 if (!xc)
1846 continue;
1847
1848 seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x"
1849 " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
1850 xc->server_num, xc->cppr, xc->hw_cppr,
1851 xc->mfrr, xc->pending,
1852 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
1853 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1854 struct xive_q *q = &xc->queues[i];
1855 u32 i0, i1, idx;
1856
1857 if (!q->qpage && !xc->esc_virq[i])
1858 continue;
1859
1860 seq_printf(m, " [q%d]: ", i);
1861
1862 if (q->qpage) {
1863 idx = q->idx;
1864 i0 = be32_to_cpup(q->qpage + idx);
1865 idx = (idx + 1) & q->msk;
1866 i1 = be32_to_cpup(q->qpage + idx);
1867 seq_printf(m, "T=%d %08x %08x... \n", q->toggle, i0, i1);
1868 }
1869 if (xc->esc_virq[i]) {
1870 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
1871 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1872 u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
1873 seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
1874 (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
1875 (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
1876 xc->esc_virq[i], pq, xd->eoi_page);
1877 seq_printf(m, "\n");
1878 }
1879 }
1880
1881 t_rm_h_xirr += xc->stat_rm_h_xirr;
1882 t_rm_h_ipoll += xc->stat_rm_h_ipoll;
1883 t_rm_h_cppr += xc->stat_rm_h_cppr;
1884 t_rm_h_eoi += xc->stat_rm_h_eoi;
1885 t_rm_h_ipi += xc->stat_rm_h_ipi;
1886 t_vm_h_xirr += xc->stat_vm_h_xirr;
1887 t_vm_h_ipoll += xc->stat_vm_h_ipoll;
1888 t_vm_h_cppr += xc->stat_vm_h_cppr;
1889 t_vm_h_eoi += xc->stat_vm_h_eoi;
1890 t_vm_h_ipi += xc->stat_vm_h_ipi;
1891 }
1892
1893 seq_printf(m, "Hcalls totals\n");
1894 seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
1895 seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
1896 seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
1897 seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
1898 seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
1899
1900 return 0;
1901}
1902
1903static int xive_debug_open(struct inode *inode, struct file *file)
1904{
1905 return single_open(file, xive_debug_show, inode->i_private);
1906}
1907
1908static const struct file_operations xive_debug_fops = {
1909 .open = xive_debug_open,
1910 .read = seq_read,
1911 .llseek = seq_lseek,
1912 .release = single_release,
1913};
1914
1915static void xive_debugfs_init(struct kvmppc_xive *xive)
1916{
1917 char *name;
1918
1919 name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
1920 if (!name) {
1921 pr_err("%s: no memory for name\n", __func__);
1922 return;
1923 }
1924
1925 xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
1926 xive, &xive_debug_fops);
1927
1928 pr_debug("%s: created %s\n", __func__, name);
1929 kfree(name);
1930}
1931
1932static void kvmppc_xive_init(struct kvm_device *dev)
1933{
1934 struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
1935
1936
1937 xive_debugfs_init(xive);
1938}
1939
1940struct kvm_device_ops kvm_xive_ops = {
1941 .name = "kvm-xive",
1942 .create = kvmppc_xive_create,
1943 .init = kvmppc_xive_init,
1944 .destroy = kvmppc_xive_free,
1945 .set_attr = xive_set_attr,
1946 .get_attr = xive_get_attr,
1947 .has_attr = xive_has_attr,
1948};
1949
1950void kvmppc_xive_init_module(void)
1951{
1952 __xive_vm_h_xirr = xive_vm_h_xirr;
1953 __xive_vm_h_ipoll = xive_vm_h_ipoll;
1954 __xive_vm_h_ipi = xive_vm_h_ipi;
1955 __xive_vm_h_cppr = xive_vm_h_cppr;
1956 __xive_vm_h_eoi = xive_vm_h_eoi;
1957}
1958
1959void kvmppc_xive_exit_module(void)
1960{
1961 __xive_vm_h_xirr = NULL;
1962 __xive_vm_h_ipoll = NULL;
1963 __xive_vm_h_ipi = NULL;
1964 __xive_vm_h_cppr = NULL;
1965 __xive_vm_h_eoi = NULL;
1966}
1967