1
2
3
4
5
6#define pr_fmt(fmt) "xive-kvm: " fmt
7
8#include <linux/kernel.h>
9#include <linux/kvm_host.h>
10#include <linux/err.h>
11#include <linux/gfp.h>
12#include <linux/spinlock.h>
13#include <linux/delay.h>
14#include <linux/percpu.h>
15#include <linux/cpumask.h>
16#include <linux/uaccess.h>
17#include <asm/kvm_book3s.h>
18#include <asm/kvm_ppc.h>
19#include <asm/hvcall.h>
20#include <asm/xics.h>
21#include <asm/xive.h>
22#include <asm/xive-regs.h>
23#include <asm/debug.h>
24#include <asm/debugfs.h>
25#include <asm/time.h>
26#include <asm/opal.h>
27
28#include <linux/debugfs.h>
29#include <linux/seq_file.h>
30
31#include "book3s_xive.h"
32
33
34
35
36
37
38
39
40
41#define XIVE_RUNTIME_CHECKS
42#define X_PFX xive_vm_
43#define X_STATIC static
44#define X_STAT_PFX stat_vm_
45#define __x_tima xive_tima
46#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
47#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
48#define __x_writeb __raw_writeb
49#define __x_readw __raw_readw
50#define __x_readq __raw_readq
51#define __x_writeq __raw_writeq
52
53#include "book3s_xive_template.c"
54
55
56
57
58
59#define XIVE_Q_GAP 2
60
61
62
63
64
65void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
66{
67 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
68 u64 pq;
69
70
71
72
73
74
75 if (!tima || !vcpu->arch.xive_cam_word)
76 return;
77
78 eieio();
79 __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
80 __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
81 vcpu->arch.xive_pushed = 1;
82 eieio();
83
84
85
86
87
88
89
90
91 vcpu->arch.irq_pending = 0;
92
93
94
95
96
97 if (vcpu->arch.xive_esc_on) {
98 pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
99 XIVE_ESB_SET_PQ_01));
100 mb();
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123 if (!(pq & XIVE_ESB_VAL_P))
124
125 vcpu->arch.xive_esc_on = 0;
126 }
127}
128EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
129
130
131
132
133
134static bool xive_irq_trigger(struct xive_irq_data *xd)
135{
136
137 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
138 return false;
139
140
141 if (WARN_ON(!xd->trig_mmio))
142 return false;
143
144 out_be64(xd->trig_mmio, 0);
145
146 return true;
147}
148
149static irqreturn_t xive_esc_irq(int irq, void *data)
150{
151 struct kvm_vcpu *vcpu = data;
152
153 vcpu->arch.irq_pending = 1;
154 smp_mb();
155 if (vcpu->arch.ceded)
156 kvmppc_fast_vcpu_kick(vcpu);
157
158
159
160
161
162
163
164
165
166
167 vcpu->arch.xive_esc_on = false;
168
169
170 smp_wmb();
171
172 return IRQ_HANDLED;
173}
174
175int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
176 bool single_escalation)
177{
178 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
179 struct xive_q *q = &xc->queues[prio];
180 char *name = NULL;
181 int rc;
182
183
184 if (xc->esc_virq[prio])
185 return 0;
186
187
188 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
189 if (!xc->esc_virq[prio]) {
190 pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
191 prio, xc->server_num);
192 return -EIO;
193 }
194
195 if (single_escalation)
196 name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
197 vcpu->kvm->arch.lpid, xc->server_num);
198 else
199 name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
200 vcpu->kvm->arch.lpid, xc->server_num, prio);
201 if (!name) {
202 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
203 prio, xc->server_num);
204 rc = -ENOMEM;
205 goto error;
206 }
207
208 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
209
210 rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
211 IRQF_NO_THREAD, name, vcpu);
212 if (rc) {
213 pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
214 prio, xc->server_num);
215 goto error;
216 }
217 xc->esc_virq_names[prio] = name;
218
219
220
221
222
223
224
225
226
227 if (single_escalation) {
228 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
229 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
230
231 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
232 vcpu->arch.xive_esc_raddr = xd->eoi_page;
233 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
234 xd->flags |= XIVE_IRQ_NO_EOI;
235 }
236
237 return 0;
238error:
239 irq_dispose_mapping(xc->esc_virq[prio]);
240 xc->esc_virq[prio] = 0;
241 kfree(name);
242 return rc;
243}
244
245static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
246{
247 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
248 struct kvmppc_xive *xive = xc->xive;
249 struct xive_q *q = &xc->queues[prio];
250 void *qpage;
251 int rc;
252
253 if (WARN_ON(q->qpage))
254 return 0;
255
256
257 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
258 if (!qpage) {
259 pr_err("Failed to allocate queue %d for VCPU %d\n",
260 prio, xc->server_num);
261 return -ENOMEM;
262 }
263 memset(qpage, 0, 1 << xive->q_order);
264
265
266
267
268
269
270
271
272 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
273 xive->q_order, true);
274 if (rc)
275 pr_err("Failed to configure queue %d for VCPU %d\n",
276 prio, xc->server_num);
277 return rc;
278}
279
280
281static int xive_check_provisioning(struct kvm *kvm, u8 prio)
282{
283 struct kvmppc_xive *xive = kvm->arch.xive;
284 struct kvm_vcpu *vcpu;
285 int i, rc;
286
287 lockdep_assert_held(&xive->lock);
288
289
290 if (xive->qmap & (1 << prio))
291 return 0;
292
293 pr_devel("Provisioning prio... %d\n", prio);
294
295
296 kvm_for_each_vcpu(i, vcpu, kvm) {
297 if (!vcpu->arch.xive_vcpu)
298 continue;
299 rc = xive_provision_queue(vcpu, prio);
300 if (rc == 0 && !xive->single_escalation)
301 kvmppc_xive_attach_escalation(vcpu, prio,
302 xive->single_escalation);
303 if (rc)
304 return rc;
305 }
306
307
308 mb();
309 xive->qmap |= (1 << prio);
310 return 0;
311}
312
313static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
314{
315 struct kvm_vcpu *vcpu;
316 struct kvmppc_xive_vcpu *xc;
317 struct xive_q *q;
318
319
320 vcpu = kvmppc_xive_find_server(kvm, server);
321 if (!vcpu) {
322 pr_warn("%s: Can't find server %d\n", __func__, server);
323 return;
324 }
325 xc = vcpu->arch.xive_vcpu;
326 if (WARN_ON(!xc))
327 return;
328
329 q = &xc->queues[prio];
330 atomic_inc(&q->pending_count);
331}
332
333static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
334{
335 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
336 struct xive_q *q;
337 u32 max;
338
339 if (WARN_ON(!xc))
340 return -ENXIO;
341 if (!xc->valid)
342 return -ENXIO;
343
344 q = &xc->queues[prio];
345 if (WARN_ON(!q->qpage))
346 return -ENXIO;
347
348
349 max = (q->msk + 1) - XIVE_Q_GAP;
350 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
351}
352
353int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
354{
355 struct kvm_vcpu *vcpu;
356 int i, rc;
357
358
359 vcpu = kvmppc_xive_find_server(kvm, *server);
360 if (!vcpu) {
361 pr_devel("Can't find server %d\n", *server);
362 return -EINVAL;
363 }
364
365 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
366
367
368 rc = xive_try_pick_queue(vcpu, prio);
369 if (rc == 0)
370 return rc;
371
372 pr_devel(" .. failed, looking up candidate...\n");
373
374
375 kvm_for_each_vcpu(i, vcpu, kvm) {
376 if (!vcpu->arch.xive_vcpu)
377 continue;
378 rc = xive_try_pick_queue(vcpu, prio);
379 if (rc == 0) {
380 *server = vcpu->arch.xive_vcpu->server_num;
381 pr_devel(" found on 0x%x/%d\n", *server, prio);
382 return rc;
383 }
384 }
385 pr_devel(" no available target !\n");
386
387
388 return -EBUSY;
389}
390
391static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
392 struct kvmppc_xive_src_block *sb,
393 struct kvmppc_xive_irq_state *state)
394{
395 struct xive_irq_data *xd;
396 u32 hw_num;
397 u8 old_prio;
398 u64 val;
399
400
401
402
403
404 for (;;) {
405 arch_spin_lock(&sb->lock);
406 old_prio = state->guest_priority;
407 state->guest_priority = MASKED;
408 mb();
409 if (!state->in_eoi)
410 break;
411 state->guest_priority = old_prio;
412 arch_spin_unlock(&sb->lock);
413 }
414
415
416 if (old_prio == MASKED)
417 return old_prio;
418
419
420 kvmppc_xive_select_irq(state, &hw_num, &xd);
421
422
423
424
425
426
427
428
429
430
431
432
433
434 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
435 xive_native_configure_irq(hw_num,
436 kvmppc_xive_vp(xive, state->act_server),
437 MASKED, state->number);
438
439 state->old_p = true;
440 state->old_q = false;
441 } else {
442
443 val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
444 state->old_p = !!(val & 2);
445 state->old_q = !!(val & 1);
446
447
448
449
450
451 xive_native_sync_source(hw_num);
452 }
453
454 return old_prio;
455}
456
457static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
458 struct kvmppc_xive_irq_state *state)
459{
460
461
462
463 for (;;) {
464 arch_spin_lock(&sb->lock);
465 if (!state->in_eoi)
466 break;
467 arch_spin_unlock(&sb->lock);
468 }
469}
470
471static void xive_finish_unmask(struct kvmppc_xive *xive,
472 struct kvmppc_xive_src_block *sb,
473 struct kvmppc_xive_irq_state *state,
474 u8 prio)
475{
476 struct xive_irq_data *xd;
477 u32 hw_num;
478
479
480 if (state->guest_priority != MASKED)
481 goto bail;
482
483
484 kvmppc_xive_select_irq(state, &hw_num, &xd);
485
486
487
488
489
490 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
491 xive_native_configure_irq(hw_num,
492 kvmppc_xive_vp(xive, state->act_server),
493 state->act_priority, state->number);
494
495 if (!state->old_p)
496 xive_vm_source_eoi(hw_num, xd);
497
498 if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
499 xive_irq_trigger(xd);
500 goto bail;
501 }
502
503
504 if (state->old_q)
505 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
506
507
508
509
510
511
512 if (!state->old_p)
513 xive_vm_source_eoi(hw_num, xd);
514
515
516 mb();
517bail:
518 state->guest_priority = prio;
519}
520
521
522
523
524
525
526
527
528static int xive_target_interrupt(struct kvm *kvm,
529 struct kvmppc_xive_irq_state *state,
530 u32 server, u8 prio)
531{
532 struct kvmppc_xive *xive = kvm->arch.xive;
533 u32 hw_num;
534 int rc;
535
536
537
538
539
540
541 rc = kvmppc_xive_select_target(kvm, &server, prio);
542
543
544
545
546
547 if (rc)
548 return rc;
549
550
551
552
553
554
555 if (state->act_priority != MASKED)
556 xive_inc_q_pending(kvm,
557 state->act_server,
558 state->act_priority);
559
560
561
562 state->act_priority = prio;
563 state->act_server = server;
564
565
566 kvmppc_xive_select_irq(state, &hw_num, NULL);
567
568 return xive_native_configure_irq(hw_num,
569 kvmppc_xive_vp(xive, server),
570 prio, state->number);
571}
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
614 u32 priority)
615{
616 struct kvmppc_xive *xive = kvm->arch.xive;
617 struct kvmppc_xive_src_block *sb;
618 struct kvmppc_xive_irq_state *state;
619 u8 new_act_prio;
620 int rc = 0;
621 u16 idx;
622
623 if (!xive)
624 return -ENODEV;
625
626 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
627 irq, server, priority);
628
629
630 if (priority != MASKED) {
631 mutex_lock(&xive->lock);
632 rc = xive_check_provisioning(xive->kvm,
633 xive_prio_from_guest(priority));
634 mutex_unlock(&xive->lock);
635 }
636 if (rc) {
637 pr_devel(" provisioning failure %d !\n", rc);
638 return rc;
639 }
640
641 sb = kvmppc_xive_find_source(xive, irq, &idx);
642 if (!sb)
643 return -EINVAL;
644 state = &sb->irq_state[idx];
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660 if (priority == MASKED)
661 xive_lock_and_mask(xive, sb, state);
662 else
663 xive_lock_for_unmask(sb, state);
664
665
666
667
668
669
670
671 new_act_prio = state->act_priority;
672 if (priority != MASKED)
673 new_act_prio = xive_prio_from_guest(priority);
674
675 pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
676 new_act_prio, state->act_server, state->act_priority);
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692 if (new_act_prio != MASKED &&
693 (state->act_server != server ||
694 state->act_priority != new_act_prio))
695 rc = xive_target_interrupt(kvm, state, server, new_act_prio);
696
697
698
699
700
701 if (priority != MASKED)
702 xive_finish_unmask(xive, sb, state, priority);
703
704
705
706
707
708 state->saved_priority = priority;
709
710 arch_spin_unlock(&sb->lock);
711 return rc;
712}
713
714int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
715 u32 *priority)
716{
717 struct kvmppc_xive *xive = kvm->arch.xive;
718 struct kvmppc_xive_src_block *sb;
719 struct kvmppc_xive_irq_state *state;
720 u16 idx;
721
722 if (!xive)
723 return -ENODEV;
724
725 sb = kvmppc_xive_find_source(xive, irq, &idx);
726 if (!sb)
727 return -EINVAL;
728 state = &sb->irq_state[idx];
729 arch_spin_lock(&sb->lock);
730 *server = state->act_server;
731 *priority = state->guest_priority;
732 arch_spin_unlock(&sb->lock);
733
734 return 0;
735}
736
737int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
738{
739 struct kvmppc_xive *xive = kvm->arch.xive;
740 struct kvmppc_xive_src_block *sb;
741 struct kvmppc_xive_irq_state *state;
742 u16 idx;
743
744 if (!xive)
745 return -ENODEV;
746
747 sb = kvmppc_xive_find_source(xive, irq, &idx);
748 if (!sb)
749 return -EINVAL;
750 state = &sb->irq_state[idx];
751
752 pr_devel("int_on(irq=0x%x)\n", irq);
753
754
755
756
757 if (state->act_priority == MASKED) {
758 pr_devel("int_on on untargetted interrupt\n");
759 return -EINVAL;
760 }
761
762
763 if (state->saved_priority == MASKED)
764 return 0;
765
766
767
768
769 xive_lock_for_unmask(sb, state);
770 xive_finish_unmask(xive, sb, state, state->saved_priority);
771 arch_spin_unlock(&sb->lock);
772
773 return 0;
774}
775
776int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
777{
778 struct kvmppc_xive *xive = kvm->arch.xive;
779 struct kvmppc_xive_src_block *sb;
780 struct kvmppc_xive_irq_state *state;
781 u16 idx;
782
783 if (!xive)
784 return -ENODEV;
785
786 sb = kvmppc_xive_find_source(xive, irq, &idx);
787 if (!sb)
788 return -EINVAL;
789 state = &sb->irq_state[idx];
790
791 pr_devel("int_off(irq=0x%x)\n", irq);
792
793
794
795
796 state->saved_priority = xive_lock_and_mask(xive, sb, state);
797 arch_spin_unlock(&sb->lock);
798
799 return 0;
800}
801
802static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
803{
804 struct kvmppc_xive_src_block *sb;
805 struct kvmppc_xive_irq_state *state;
806 u16 idx;
807
808 sb = kvmppc_xive_find_source(xive, irq, &idx);
809 if (!sb)
810 return false;
811 state = &sb->irq_state[idx];
812 if (!state->valid)
813 return false;
814
815
816
817
818
819 xive_irq_trigger(&state->ipi_data);
820
821 return true;
822}
823
824u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
825{
826 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
827
828 if (!xc)
829 return 0;
830
831
832 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
833 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
834 (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
835}
836
837int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
838{
839 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
840 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
841 u8 cppr, mfrr;
842 u32 xisr;
843
844 if (!xc || !xive)
845 return -ENOENT;
846
847
848 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
849 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
850 KVM_REG_PPC_ICP_XISR_MASK;
851 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
852
853 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
854 xc->server_num, cppr, mfrr, xisr);
855
856
857
858
859
860
861 if (WARN_ON(vcpu->arch.xive_pushed))
862 return -EIO;
863
864
865 vcpu->arch.xive_saved_state.cppr = cppr;
866 xc->hw_cppr = xc->cppr = cppr;
867
868
869
870
871
872
873
874 xc->mfrr = mfrr;
875 if (mfrr < cppr)
876 xive_irq_trigger(&xc->vp_ipi_data);
877
878
879
880
881
882
883
884
885
886
887 if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
888 xc->delayed_irq = xisr;
889 xive->delayed_irqs++;
890 pr_devel(" xisr restore delayed\n");
891 }
892
893 return 0;
894}
895
896int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
897 struct irq_desc *host_desc)
898{
899 struct kvmppc_xive *xive = kvm->arch.xive;
900 struct kvmppc_xive_src_block *sb;
901 struct kvmppc_xive_irq_state *state;
902 struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
903 unsigned int host_irq = irq_desc_get_irq(host_desc);
904 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
905 u16 idx;
906 u8 prio;
907 int rc;
908
909 if (!xive)
910 return -ENODEV;
911
912 pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
913
914 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
915 if (!sb)
916 return -EINVAL;
917 state = &sb->irq_state[idx];
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932 rc = irq_set_vcpu_affinity(host_irq, state);
933 if (rc) {
934 pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
935 return rc;
936 }
937
938
939
940
941
942
943 prio = xive_lock_and_mask(xive, sb, state);
944 pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
945 state->old_p, state->old_q);
946
947
948 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
949
950
951
952
953
954 if (xive->ops && xive->ops->reset_mapped)
955 xive->ops->reset_mapped(kvm, guest_irq);
956
957
958 state->pt_number = hw_irq;
959 state->pt_data = irq_data_get_irq_handler_data(host_data);
960
961
962
963
964
965
966
967 xive_native_configure_irq(hw_irq,
968 kvmppc_xive_vp(xive, state->act_server),
969 state->act_priority, state->number);
970
971
972
973
974
975
976
977
978 if (prio != MASKED && !state->old_p)
979 xive_vm_source_eoi(hw_irq, state->pt_data);
980
981
982 state->old_p = state->old_q = false;
983
984
985 mb();
986 state->guest_priority = prio;
987 arch_spin_unlock(&sb->lock);
988
989 return 0;
990}
991EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
992
993int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
994 struct irq_desc *host_desc)
995{
996 struct kvmppc_xive *xive = kvm->arch.xive;
997 struct kvmppc_xive_src_block *sb;
998 struct kvmppc_xive_irq_state *state;
999 unsigned int host_irq = irq_desc_get_irq(host_desc);
1000 u16 idx;
1001 u8 prio;
1002 int rc;
1003
1004 if (!xive)
1005 return -ENODEV;
1006
1007 pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
1008
1009 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
1010 if (!sb)
1011 return -EINVAL;
1012 state = &sb->irq_state[idx];
1013
1014
1015
1016
1017
1018
1019 prio = xive_lock_and_mask(xive, sb, state);
1020 pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
1021 state->old_p, state->old_q);
1022
1023
1024
1025
1026
1027
1028 if (state->old_p)
1029 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
1030
1031
1032 rc = irq_set_vcpu_affinity(host_irq, NULL);
1033 if (rc) {
1034 pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
1035 return rc;
1036 }
1037
1038
1039 state->pt_number = 0;
1040 state->pt_data = NULL;
1041
1042
1043
1044
1045
1046 if (xive->ops && xive->ops->reset_mapped) {
1047 xive->ops->reset_mapped(kvm, guest_irq);
1048 }
1049
1050
1051 xive_native_configure_irq(state->ipi_number,
1052 kvmppc_xive_vp(xive, state->act_server),
1053 state->act_priority, state->number);
1054
1055
1056
1057
1058
1059
1060 if (prio == MASKED || state->old_p)
1061 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
1062 else
1063 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
1064
1065
1066 mb();
1067 state->guest_priority = prio;
1068 arch_spin_unlock(&sb->lock);
1069
1070 return 0;
1071}
1072EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
1073
1074void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
1075{
1076 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1077 struct kvm *kvm = vcpu->kvm;
1078 struct kvmppc_xive *xive = kvm->arch.xive;
1079 int i, j;
1080
1081 for (i = 0; i <= xive->max_sbid; i++) {
1082 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1083
1084 if (!sb)
1085 continue;
1086 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
1087 struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
1088
1089 if (!state->valid)
1090 continue;
1091 if (state->act_priority == MASKED)
1092 continue;
1093 if (state->act_server != xc->server_num)
1094 continue;
1095
1096
1097 arch_spin_lock(&sb->lock);
1098 state->act_priority = MASKED;
1099 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1100 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
1101 if (state->pt_number) {
1102 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
1103 xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
1104 }
1105 arch_spin_unlock(&sb->lock);
1106 }
1107 }
1108
1109
1110 if (vcpu->arch.xive_esc_on) {
1111 __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
1112 XIVE_ESB_SET_PQ_01));
1113 vcpu->arch.xive_esc_on = false;
1114 }
1115
1116
1117
1118
1119
1120
1121 vcpu->arch.xive_esc_vaddr = 0;
1122 vcpu->arch.xive_esc_raddr = 0;
1123}
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
1134 struct kvmppc_xive_vcpu *xc, int irq)
1135{
1136 struct irq_data *d = irq_get_irq_data(irq);
1137 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1138
1139
1140
1141
1142
1143
1144 xd->stale_p = false;
1145 smp_mb();
1146 if (!vcpu->arch.xive_esc_on)
1147 xd->stale_p = true;
1148}
1149
1150void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1151{
1152 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1153 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1154 int i;
1155
1156 if (!kvmppc_xics_enabled(vcpu))
1157 return;
1158
1159 if (!xc)
1160 return;
1161
1162 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
1163
1164
1165 xc->valid = false;
1166 kvmppc_xive_disable_vcpu_interrupts(vcpu);
1167
1168
1169 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1170
1171
1172 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1173 if (xc->esc_virq[i]) {
1174 if (xc->xive->single_escalation)
1175 xive_cleanup_single_escalation(vcpu, xc,
1176 xc->esc_virq[i]);
1177 free_irq(xc->esc_virq[i], vcpu);
1178 irq_dispose_mapping(xc->esc_virq[i]);
1179 kfree(xc->esc_virq_names[i]);
1180 }
1181 }
1182
1183
1184 xive_native_disable_vp(xc->vp_id);
1185
1186
1187 vcpu->arch.xive_cam_word = 0;
1188
1189
1190 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1191 struct xive_q *q = &xc->queues[i];
1192
1193 xive_native_disable_queue(xc->vp_id, q, i);
1194 if (q->qpage) {
1195 free_pages((unsigned long)q->qpage,
1196 xive->q_page_order);
1197 q->qpage = NULL;
1198 }
1199 }
1200
1201
1202 if (xc->vp_ipi) {
1203 xive_cleanup_irq_data(&xc->vp_ipi_data);
1204 xive_native_free_irq(xc->vp_ipi);
1205 }
1206
1207 kfree(xc);
1208
1209
1210 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1211 vcpu->arch.xive_vcpu = NULL;
1212}
1213
1214static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
1215{
1216
1217
1218
1219
1220
1221
1222 return cpu < xive->nr_servers * xive->kvm->arch.emul_smt_mode;
1223}
1224
1225int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
1226{
1227 u32 vp_id;
1228
1229 if (!kvmppc_xive_vcpu_id_valid(xive, cpu)) {
1230 pr_devel("Out of bounds !\n");
1231 return -EINVAL;
1232 }
1233
1234 if (xive->vp_base == XIVE_INVALID_VP) {
1235 xive->vp_base = xive_native_alloc_vp_block(xive->nr_servers);
1236 pr_devel("VP_Base=%x nr_servers=%d\n", xive->vp_base, xive->nr_servers);
1237
1238 if (xive->vp_base == XIVE_INVALID_VP)
1239 return -ENOSPC;
1240 }
1241
1242 vp_id = kvmppc_xive_vp(xive, cpu);
1243 if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
1244 pr_devel("Duplicate !\n");
1245 return -EEXIST;
1246 }
1247
1248 *vp = vp_id;
1249
1250 return 0;
1251}
1252
1253int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1254 struct kvm_vcpu *vcpu, u32 cpu)
1255{
1256 struct kvmppc_xive *xive = dev->private;
1257 struct kvmppc_xive_vcpu *xc;
1258 int i, r = -EBUSY;
1259 u32 vp_id;
1260
1261 pr_devel("connect_vcpu(cpu=%d)\n", cpu);
1262
1263 if (dev->ops != &kvm_xive_ops) {
1264 pr_devel("Wrong ops !\n");
1265 return -EPERM;
1266 }
1267 if (xive->kvm != vcpu->kvm)
1268 return -EPERM;
1269 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
1270 return -EBUSY;
1271
1272
1273 mutex_lock(&xive->lock);
1274
1275 r = kvmppc_xive_compute_vp_id(xive, cpu, &vp_id);
1276 if (r)
1277 goto bail;
1278
1279 xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1280 if (!xc) {
1281 r = -ENOMEM;
1282 goto bail;
1283 }
1284
1285 vcpu->arch.xive_vcpu = xc;
1286 xc->xive = xive;
1287 xc->vcpu = vcpu;
1288 xc->server_num = cpu;
1289 xc->vp_id = vp_id;
1290 xc->mfrr = 0xff;
1291 xc->valid = true;
1292
1293 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1294 if (r)
1295 goto bail;
1296
1297
1298 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
1299 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1300
1301
1302 xc->vp_ipi = xive_native_alloc_irq();
1303 if (!xc->vp_ipi) {
1304 pr_err("Failed to allocate xive irq for VCPU IPI\n");
1305 r = -EIO;
1306 goto bail;
1307 }
1308 pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1309
1310 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1311 if (r)
1312 goto bail;
1313
1314
1315
1316
1317
1318 r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
1319 if (r) {
1320 pr_err("Failed to enable VP in OPAL, err %d\n", r);
1321 goto bail;
1322 }
1323
1324
1325
1326
1327
1328
1329
1330
1331 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1332 struct xive_q *q = &xc->queues[i];
1333
1334
1335 if (i == 7 && xive->single_escalation)
1336 break;
1337
1338
1339 if (xive->qmap & (1 << i)) {
1340 r = xive_provision_queue(vcpu, i);
1341 if (r == 0 && !xive->single_escalation)
1342 kvmppc_xive_attach_escalation(
1343 vcpu, i, xive->single_escalation);
1344 if (r)
1345 goto bail;
1346 } else {
1347 r = xive_native_configure_queue(xc->vp_id,
1348 q, i, NULL, 0, true);
1349 if (r) {
1350 pr_err("Failed to configure queue %d for VCPU %d\n",
1351 i, cpu);
1352 goto bail;
1353 }
1354 }
1355 }
1356
1357
1358 r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation);
1359 if (r)
1360 goto bail;
1361
1362
1363 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
1364 if (!r)
1365 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
1366
1367bail:
1368 mutex_unlock(&xive->lock);
1369 if (r) {
1370 kvmppc_xive_cleanup_vcpu(vcpu);
1371 return r;
1372 }
1373
1374 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1375 return 0;
1376}
1377
1378
1379
1380
1381static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
1382{
1383 struct kvmppc_xive_src_block *sb;
1384 struct kvmppc_xive_irq_state *state;
1385 u16 idx;
1386
1387 sb = kvmppc_xive_find_source(xive, irq, &idx);
1388 if (!sb)
1389 return;
1390
1391 state = &sb->irq_state[idx];
1392
1393
1394 if (!state->valid) {
1395 pr_err("invalid irq 0x%x in cpu queue!\n", irq);
1396 return;
1397 }
1398
1399
1400
1401
1402
1403
1404 if (!state->saved_p)
1405 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
1406
1407
1408 state->in_queue = true;
1409}
1410
1411static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
1412 struct kvmppc_xive_src_block *sb,
1413 u32 irq)
1414{
1415 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1416
1417 if (!state->valid)
1418 return;
1419
1420
1421 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
1422
1423
1424 state->saved_p = state->old_p;
1425 state->saved_q = state->old_q;
1426
1427
1428 arch_spin_unlock(&sb->lock);
1429}
1430
1431static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
1432 struct kvmppc_xive_src_block *sb,
1433 u32 irq)
1434{
1435 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1436
1437 if (!state->valid)
1438 return;
1439
1440
1441
1442
1443
1444
1445 xive_lock_for_unmask(sb, state);
1446
1447
1448 if (state->saved_scan_prio != MASKED)
1449 xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
1450
1451
1452 arch_spin_unlock(&sb->lock);
1453}
1454
1455static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
1456{
1457 u32 idx = q->idx;
1458 u32 toggle = q->toggle;
1459 u32 irq;
1460
1461 do {
1462 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
1463 if (irq > XICS_IPI)
1464 xive_pre_save_set_queued(xive, irq);
1465 } while(irq);
1466}
1467
1468static void xive_pre_save_scan(struct kvmppc_xive *xive)
1469{
1470 struct kvm_vcpu *vcpu = NULL;
1471 int i, j;
1472
1473
1474
1475
1476
1477 for (i = 0; i <= xive->max_sbid; i++) {
1478 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1479 if (!sb)
1480 continue;
1481 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1482 xive_pre_save_mask_irq(xive, sb, j);
1483 }
1484
1485
1486 kvm_for_each_vcpu(i, vcpu, xive->kvm) {
1487 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1488 if (!xc)
1489 continue;
1490 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
1491 if (xc->queues[j].qpage)
1492 xive_pre_save_queue(xive, &xc->queues[j]);
1493 }
1494 }
1495
1496
1497 for (i = 0; i <= xive->max_sbid; i++) {
1498 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1499 if (!sb)
1500 continue;
1501 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1502 xive_pre_save_unmask_irq(xive, sb, j);
1503 }
1504}
1505
1506static void xive_post_save_scan(struct kvmppc_xive *xive)
1507{
1508 u32 i, j;
1509
1510
1511 for (i = 0; i <= xive->max_sbid; i++) {
1512 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1513 if (!sb)
1514 continue;
1515 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1516 sb->irq_state[j].in_queue = false;
1517 }
1518
1519
1520 xive->saved_src_count = 0;
1521}
1522
1523
1524
1525
1526static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1527{
1528 struct kvmppc_xive_src_block *sb;
1529 struct kvmppc_xive_irq_state *state;
1530 u64 __user *ubufp = (u64 __user *) addr;
1531 u64 val, prio;
1532 u16 idx;
1533
1534 sb = kvmppc_xive_find_source(xive, irq, &idx);
1535 if (!sb)
1536 return -ENOENT;
1537
1538 state = &sb->irq_state[idx];
1539
1540 if (!state->valid)
1541 return -ENOENT;
1542
1543 pr_devel("get_source(%ld)...\n", irq);
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561 if (xive->saved_src_count == 0)
1562 xive_pre_save_scan(xive);
1563 xive->saved_src_count++;
1564
1565
1566 val = state->act_server;
1567 prio = state->saved_scan_prio;
1568
1569 if (prio == MASKED) {
1570 val |= KVM_XICS_MASKED;
1571 prio = state->saved_priority;
1572 }
1573 val |= prio << KVM_XICS_PRIORITY_SHIFT;
1574 if (state->lsi) {
1575 val |= KVM_XICS_LEVEL_SENSITIVE;
1576 if (state->saved_p)
1577 val |= KVM_XICS_PENDING;
1578 } else {
1579 if (state->saved_p)
1580 val |= KVM_XICS_PRESENTED;
1581
1582 if (state->saved_q)
1583 val |= KVM_XICS_QUEUED;
1584
1585
1586
1587
1588
1589
1590
1591 if (state->in_queue || (prio == MASKED && state->saved_q))
1592 val |= KVM_XICS_PENDING;
1593 }
1594
1595
1596
1597
1598
1599 if (xive->saved_src_count == xive->src_count)
1600 xive_post_save_scan(xive);
1601
1602
1603 if (put_user(val, ubufp))
1604 return -EFAULT;
1605
1606 return 0;
1607}
1608
1609struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
1610 struct kvmppc_xive *xive, int irq)
1611{
1612 struct kvmppc_xive_src_block *sb;
1613 int i, bid;
1614
1615 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
1616
1617 mutex_lock(&xive->lock);
1618
1619
1620 if (xive->src_blocks[bid])
1621 goto out;
1622
1623
1624 sb = kzalloc(sizeof(*sb), GFP_KERNEL);
1625 if (!sb)
1626 goto out;
1627
1628 sb->id = bid;
1629
1630 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1631 sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
1632 sb->irq_state[i].eisn = 0;
1633 sb->irq_state[i].guest_priority = MASKED;
1634 sb->irq_state[i].saved_priority = MASKED;
1635 sb->irq_state[i].act_priority = MASKED;
1636 }
1637 smp_wmb();
1638 xive->src_blocks[bid] = sb;
1639
1640 if (bid > xive->max_sbid)
1641 xive->max_sbid = bid;
1642
1643out:
1644 mutex_unlock(&xive->lock);
1645 return xive->src_blocks[bid];
1646}
1647
1648static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
1649{
1650 struct kvm *kvm = xive->kvm;
1651 struct kvm_vcpu *vcpu = NULL;
1652 int i;
1653
1654 kvm_for_each_vcpu(i, vcpu, kvm) {
1655 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1656
1657 if (!xc)
1658 continue;
1659
1660 if (xc->delayed_irq == irq) {
1661 xc->delayed_irq = 0;
1662 xive->delayed_irqs--;
1663 return true;
1664 }
1665 }
1666 return false;
1667}
1668
1669static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1670{
1671 struct kvmppc_xive_src_block *sb;
1672 struct kvmppc_xive_irq_state *state;
1673 u64 __user *ubufp = (u64 __user *) addr;
1674 u16 idx;
1675 u64 val;
1676 u8 act_prio, guest_prio;
1677 u32 server;
1678 int rc = 0;
1679
1680 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1681 return -ENOENT;
1682
1683 pr_devel("set_source(irq=0x%lx)\n", irq);
1684
1685
1686 sb = kvmppc_xive_find_source(xive, irq, &idx);
1687 if (!sb) {
1688 pr_devel("No source, creating source block...\n");
1689 sb = kvmppc_xive_create_src_block(xive, irq);
1690 if (!sb) {
1691 pr_devel("Failed to create block...\n");
1692 return -ENOMEM;
1693 }
1694 }
1695 state = &sb->irq_state[idx];
1696
1697
1698 if (get_user(val, ubufp)) {
1699 pr_devel("fault getting user info !\n");
1700 return -EFAULT;
1701 }
1702
1703 server = val & KVM_XICS_DESTINATION_MASK;
1704 guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
1705
1706 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1707 val, server, guest_prio);
1708
1709
1710
1711
1712
1713 if (!state->ipi_number) {
1714 state->ipi_number = xive_native_alloc_irq();
1715 if (state->ipi_number == 0) {
1716 pr_devel("Failed to allocate IPI !\n");
1717 return -ENOMEM;
1718 }
1719 xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
1720 pr_devel(" src_ipi=0x%x\n", state->ipi_number);
1721 }
1722
1723
1724
1725
1726
1727
1728
1729
1730 state->guest_priority = 0;
1731 xive_lock_and_mask(xive, sb, state);
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741 act_prio = xive_prio_from_guest(guest_prio);
1742 state->act_priority = MASKED;
1743
1744
1745
1746
1747
1748
1749 arch_spin_unlock(&sb->lock);
1750
1751
1752 if (act_prio != MASKED) {
1753
1754 mutex_lock(&xive->lock);
1755 rc = xive_check_provisioning(xive->kvm, act_prio);
1756 mutex_unlock(&xive->lock);
1757
1758
1759 if (rc == 0)
1760 rc = xive_target_interrupt(xive->kvm, state,
1761 server, act_prio);
1762
1763
1764
1765
1766
1767 }
1768
1769
1770
1771
1772
1773 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
1774 val |= KVM_XICS_PENDING;
1775 pr_devel(" Found delayed ! forcing PENDING !\n");
1776 }
1777
1778
1779 state->old_p = false;
1780 state->old_q = false;
1781 state->lsi = false;
1782 state->asserted = false;
1783
1784
1785 if (val & KVM_XICS_LEVEL_SENSITIVE) {
1786 state->lsi = true;
1787 if (val & KVM_XICS_PENDING)
1788 state->asserted = true;
1789 pr_devel(" LSI ! Asserted=%d\n", state->asserted);
1790 }
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802 if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
1803 state->old_p = true;
1804 if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
1805 state->old_q = true;
1806
1807 pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q);
1808
1809
1810
1811
1812
1813
1814 if (val & KVM_XICS_MASKED) {
1815 pr_devel(" masked, saving prio\n");
1816 state->guest_priority = MASKED;
1817 state->saved_priority = guest_prio;
1818 } else {
1819 pr_devel(" unmasked, restoring to prio %d\n", guest_prio);
1820 xive_finish_unmask(xive, sb, state, guest_prio);
1821 state->saved_priority = guest_prio;
1822 }
1823
1824
1825 if (!state->valid)
1826 xive->src_count++;
1827 state->valid = true;
1828
1829 return 0;
1830}
1831
1832int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1833 bool line_status)
1834{
1835 struct kvmppc_xive *xive = kvm->arch.xive;
1836 struct kvmppc_xive_src_block *sb;
1837 struct kvmppc_xive_irq_state *state;
1838 u16 idx;
1839
1840 if (!xive)
1841 return -ENODEV;
1842
1843 sb = kvmppc_xive_find_source(xive, irq, &idx);
1844 if (!sb)
1845 return -EINVAL;
1846
1847
1848 state = &sb->irq_state[idx];
1849 if (!state->valid)
1850 return -EINVAL;
1851
1852
1853 if (state->pt_number)
1854 return -EINVAL;
1855
1856 if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
1857 state->asserted = 1;
1858 else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
1859 state->asserted = 0;
1860 return 0;
1861 }
1862
1863
1864 xive_irq_trigger(&state->ipi_data);
1865
1866 return 0;
1867}
1868
1869int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr)
1870{
1871 u32 __user *ubufp = (u32 __user *) addr;
1872 u32 nr_servers;
1873 int rc = 0;
1874
1875 if (get_user(nr_servers, ubufp))
1876 return -EFAULT;
1877
1878 pr_devel("%s nr_servers=%u\n", __func__, nr_servers);
1879
1880 if (!nr_servers || nr_servers > KVM_MAX_VCPU_ID)
1881 return -EINVAL;
1882
1883 mutex_lock(&xive->lock);
1884 if (xive->vp_base != XIVE_INVALID_VP)
1885
1886
1887
1888
1889
1890
1891
1892 rc = -EBUSY;
1893 else if (nr_servers > KVM_MAX_VCPUS)
1894
1895
1896
1897 xive->nr_servers = KVM_MAX_VCPUS;
1898 else
1899 xive->nr_servers = nr_servers;
1900
1901 mutex_unlock(&xive->lock);
1902
1903 return rc;
1904}
1905
1906static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1907{
1908 struct kvmppc_xive *xive = dev->private;
1909
1910
1911 switch (attr->group) {
1912 case KVM_DEV_XICS_GRP_SOURCES:
1913 return xive_set_source(xive, attr->attr, attr->addr);
1914 case KVM_DEV_XICS_GRP_CTRL:
1915 switch (attr->attr) {
1916 case KVM_DEV_XICS_NR_SERVERS:
1917 return kvmppc_xive_set_nr_servers(xive, attr->addr);
1918 }
1919 }
1920 return -ENXIO;
1921}
1922
1923static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1924{
1925 struct kvmppc_xive *xive = dev->private;
1926
1927
1928 switch (attr->group) {
1929 case KVM_DEV_XICS_GRP_SOURCES:
1930 return xive_get_source(xive, attr->attr, attr->addr);
1931 }
1932 return -ENXIO;
1933}
1934
1935static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1936{
1937
1938 switch (attr->group) {
1939 case KVM_DEV_XICS_GRP_SOURCES:
1940 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1941 attr->attr < KVMPPC_XICS_NR_IRQS)
1942 return 0;
1943 break;
1944 case KVM_DEV_XICS_GRP_CTRL:
1945 switch (attr->attr) {
1946 case KVM_DEV_XICS_NR_SERVERS:
1947 return 0;
1948 }
1949 }
1950 return -ENXIO;
1951}
1952
1953static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
1954{
1955 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
1956 xive_native_configure_irq(hw_num, 0, MASKED, 0);
1957}
1958
1959void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
1960{
1961 int i;
1962
1963 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1964 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
1965
1966 if (!state->valid)
1967 continue;
1968
1969 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
1970 xive_cleanup_irq_data(&state->ipi_data);
1971 xive_native_free_irq(state->ipi_number);
1972
1973
1974 if (state->pt_number)
1975 kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
1976
1977 state->valid = false;
1978 }
1979}
1980
1981
1982
1983
1984static void kvmppc_xive_release(struct kvm_device *dev)
1985{
1986 struct kvmppc_xive *xive = dev->private;
1987 struct kvm *kvm = xive->kvm;
1988 struct kvm_vcpu *vcpu;
1989 int i;
1990
1991 pr_devel("Releasing xive device\n");
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002 debugfs_remove(xive->dentry);
2003
2004
2005
2006
2007 kvm_for_each_vcpu(i, vcpu, kvm) {
2008
2009
2010
2011
2012
2013
2014
2015
2016 mutex_lock(&vcpu->mutex);
2017 kvmppc_xive_cleanup_vcpu(vcpu);
2018 mutex_unlock(&vcpu->mutex);
2019 }
2020
2021
2022
2023
2024
2025
2026
2027 kvm->arch.xive = NULL;
2028
2029
2030 for (i = 0; i <= xive->max_sbid; i++) {
2031 if (xive->src_blocks[i])
2032 kvmppc_xive_free_sources(xive->src_blocks[i]);
2033 kfree(xive->src_blocks[i]);
2034 xive->src_blocks[i] = NULL;
2035 }
2036
2037 if (xive->vp_base != XIVE_INVALID_VP)
2038 xive_native_free_vp_block(xive->vp_base);
2039
2040
2041
2042
2043
2044
2045
2046
2047 kfree(dev);
2048}
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type)
2060{
2061 struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ?
2062 &kvm->arch.xive_devices.native :
2063 &kvm->arch.xive_devices.xics_on_xive;
2064 struct kvmppc_xive *xive = *kvm_xive_device;
2065
2066 if (!xive) {
2067 xive = kzalloc(sizeof(*xive), GFP_KERNEL);
2068 *kvm_xive_device = xive;
2069 } else {
2070 memset(xive, 0, sizeof(*xive));
2071 }
2072
2073 return xive;
2074}
2075
2076
2077
2078
2079static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
2080{
2081 struct kvmppc_xive *xive;
2082 struct kvm *kvm = dev->kvm;
2083
2084 pr_devel("Creating xive for partition\n");
2085
2086
2087 if (kvm->arch.xive)
2088 return -EEXIST;
2089
2090 xive = kvmppc_xive_get_device(kvm, type);
2091 if (!xive)
2092 return -ENOMEM;
2093
2094 dev->private = xive;
2095 xive->dev = dev;
2096 xive->kvm = kvm;
2097 mutex_init(&xive->lock);
2098
2099
2100 xive->q_order = xive_native_default_eq_shift();
2101 if (xive->q_order < PAGE_SHIFT)
2102 xive->q_page_order = 0;
2103 else
2104 xive->q_page_order = xive->q_order - PAGE_SHIFT;
2105
2106
2107 xive->vp_base = XIVE_INVALID_VP;
2108
2109
2110
2111 xive->nr_servers = KVM_MAX_VCPUS;
2112
2113 xive->single_escalation = xive_native_has_single_escalation();
2114
2115 kvm->arch.xive = xive;
2116 return 0;
2117}
2118
2119int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
2120{
2121 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2122 unsigned int i;
2123
2124 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
2125 struct xive_q *q = &xc->queues[i];
2126 u32 i0, i1, idx;
2127
2128 if (!q->qpage && !xc->esc_virq[i])
2129 continue;
2130
2131 seq_printf(m, " [q%d]: ", i);
2132
2133 if (q->qpage) {
2134 idx = q->idx;
2135 i0 = be32_to_cpup(q->qpage + idx);
2136 idx = (idx + 1) & q->msk;
2137 i1 = be32_to_cpup(q->qpage + idx);
2138 seq_printf(m, "T=%d %08x %08x...\n", q->toggle,
2139 i0, i1);
2140 }
2141 if (xc->esc_virq[i]) {
2142 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
2143 struct xive_irq_data *xd =
2144 irq_data_get_irq_handler_data(d);
2145 u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
2146
2147 seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
2148 (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
2149 (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
2150 xc->esc_virq[i], pq, xd->eoi_page);
2151 seq_puts(m, "\n");
2152 }
2153 }
2154 return 0;
2155}
2156
2157static int xive_debug_show(struct seq_file *m, void *private)
2158{
2159 struct kvmppc_xive *xive = m->private;
2160 struct kvm *kvm = xive->kvm;
2161 struct kvm_vcpu *vcpu;
2162 u64 t_rm_h_xirr = 0;
2163 u64 t_rm_h_ipoll = 0;
2164 u64 t_rm_h_cppr = 0;
2165 u64 t_rm_h_eoi = 0;
2166 u64 t_rm_h_ipi = 0;
2167 u64 t_vm_h_xirr = 0;
2168 u64 t_vm_h_ipoll = 0;
2169 u64 t_vm_h_cppr = 0;
2170 u64 t_vm_h_eoi = 0;
2171 u64 t_vm_h_ipi = 0;
2172 unsigned int i;
2173
2174 if (!kvm)
2175 return 0;
2176
2177 seq_printf(m, "=========\nVCPU state\n=========\n");
2178
2179 kvm_for_each_vcpu(i, vcpu, kvm) {
2180 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2181
2182 if (!xc)
2183 continue;
2184
2185 seq_printf(m, "cpu server %#x VP:%#x CPPR:%#x HWCPPR:%#x"
2186 " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
2187 xc->server_num, xc->vp_id, xc->cppr, xc->hw_cppr,
2188 xc->mfrr, xc->pending,
2189 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
2190
2191 kvmppc_xive_debug_show_queues(m, vcpu);
2192
2193 t_rm_h_xirr += xc->stat_rm_h_xirr;
2194 t_rm_h_ipoll += xc->stat_rm_h_ipoll;
2195 t_rm_h_cppr += xc->stat_rm_h_cppr;
2196 t_rm_h_eoi += xc->stat_rm_h_eoi;
2197 t_rm_h_ipi += xc->stat_rm_h_ipi;
2198 t_vm_h_xirr += xc->stat_vm_h_xirr;
2199 t_vm_h_ipoll += xc->stat_vm_h_ipoll;
2200 t_vm_h_cppr += xc->stat_vm_h_cppr;
2201 t_vm_h_eoi += xc->stat_vm_h_eoi;
2202 t_vm_h_ipi += xc->stat_vm_h_ipi;
2203 }
2204
2205 seq_printf(m, "Hcalls totals\n");
2206 seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
2207 seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
2208 seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
2209 seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
2210 seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
2211
2212 return 0;
2213}
2214
2215DEFINE_SHOW_ATTRIBUTE(xive_debug);
2216
2217static void xive_debugfs_init(struct kvmppc_xive *xive)
2218{
2219 char *name;
2220
2221 name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
2222 if (!name) {
2223 pr_err("%s: no memory for name\n", __func__);
2224 return;
2225 }
2226
2227 xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
2228 xive, &xive_debug_fops);
2229
2230 pr_debug("%s: created %s\n", __func__, name);
2231 kfree(name);
2232}
2233
2234static void kvmppc_xive_init(struct kvm_device *dev)
2235{
2236 struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
2237
2238
2239 xive_debugfs_init(xive);
2240}
2241
2242struct kvm_device_ops kvm_xive_ops = {
2243 .name = "kvm-xive",
2244 .create = kvmppc_xive_create,
2245 .init = kvmppc_xive_init,
2246 .release = kvmppc_xive_release,
2247 .set_attr = xive_set_attr,
2248 .get_attr = xive_get_attr,
2249 .has_attr = xive_has_attr,
2250};
2251
2252void kvmppc_xive_init_module(void)
2253{
2254 __xive_vm_h_xirr = xive_vm_h_xirr;
2255 __xive_vm_h_ipoll = xive_vm_h_ipoll;
2256 __xive_vm_h_ipi = xive_vm_h_ipi;
2257 __xive_vm_h_cppr = xive_vm_h_cppr;
2258 __xive_vm_h_eoi = xive_vm_h_eoi;
2259}
2260
2261void kvmppc_xive_exit_module(void)
2262{
2263 __xive_vm_h_xirr = NULL;
2264 __xive_vm_h_ipoll = NULL;
2265 __xive_vm_h_ipi = NULL;
2266 __xive_vm_h_cppr = NULL;
2267 __xive_vm_h_eoi = NULL;
2268}
2269