1
2
3
4
5
6#define pr_fmt(fmt) "xive-kvm: " fmt
7
8#include <linux/kernel.h>
9#include <linux/kvm_host.h>
10#include <linux/err.h>
11#include <linux/gfp.h>
12#include <linux/spinlock.h>
13#include <linux/delay.h>
14#include <linux/percpu.h>
15#include <linux/cpumask.h>
16#include <linux/uaccess.h>
17#include <linux/irqdomain.h>
18#include <asm/kvm_book3s.h>
19#include <asm/kvm_ppc.h>
20#include <asm/hvcall.h>
21#include <asm/xics.h>
22#include <asm/xive.h>
23#include <asm/xive-regs.h>
24#include <asm/debug.h>
25#include <asm/time.h>
26#include <asm/opal.h>
27
28#include <linux/debugfs.h>
29#include <linux/seq_file.h>
30
31#include "book3s_xive.h"
32
33#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
34#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
35
36
37#define XICS_DUMMY 1
38
39static void xive_vm_ack_pending(struct kvmppc_xive_vcpu *xc)
40{
41 u8 cppr;
42 u16 ack;
43
44
45
46
47
48 eieio();
49
50
51 ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG));
52
53
54 mb();
55
56
57
58
59 if (!((ack >> 8) & TM_QW1_NSR_EO))
60 return;
61
62
63 cppr = ack & 0xff;
64 if (cppr < 8)
65 xc->pending |= 1 << cppr;
66
67
68 if (cppr >= xc->hw_cppr)
69 pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n",
70 smp_processor_id(), cppr, xc->hw_cppr);
71
72
73
74
75
76
77 xc->hw_cppr = cppr;
78}
79
80static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset)
81{
82 u64 val;
83
84 if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
85 offset |= XIVE_ESB_LD_ST_MO;
86
87 val = __raw_readq(__x_eoi_page(xd) + offset);
88#ifdef __LITTLE_ENDIAN__
89 val >>= 64-8;
90#endif
91 return (u8)val;
92}
93
94
95static void xive_vm_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
96{
97
98 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
99 __raw_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
100 else if (xd->flags & XIVE_IRQ_FLAG_LSI) {
101
102
103
104
105
106 __raw_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
107 } else {
108 uint64_t eoi_val;
109
110
111
112
113
114
115
116
117
118
119 eoi_val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_00);
120
121
122 if ((eoi_val & 1) && __x_trig_page(xd))
123 __raw_writeq(0, __x_trig_page(xd));
124 }
125}
126
127enum {
128 scan_fetch,
129 scan_poll,
130 scan_eoi,
131};
132
133static u32 xive_vm_scan_interrupts(struct kvmppc_xive_vcpu *xc,
134 u8 pending, int scan_type)
135{
136 u32 hirq = 0;
137 u8 prio = 0xff;
138
139
140 while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) {
141 struct xive_q *q;
142 u32 idx, toggle;
143 __be32 *qpage;
144
145
146
147
148
149 prio = ffs(pending) - 1;
150
151
152 if (prio >= xc->cppr || prio > 7) {
153 if (xc->mfrr < xc->cppr) {
154 prio = xc->mfrr;
155 hirq = XICS_IPI;
156 }
157 break;
158 }
159
160
161 q = &xc->queues[prio];
162 idx = q->idx;
163 toggle = q->toggle;
164
165
166
167
168
169
170
171 qpage = READ_ONCE(q->qpage);
172
173skip_ipi:
174
175
176
177
178 hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle);
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194 if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
195 if (scan_type == scan_fetch) {
196 xive_vm_source_eoi(xc->vp_ipi,
197 &xc->vp_ipi_data);
198 q->idx = idx;
199 q->toggle = toggle;
200 }
201
202 WARN_ON(hirq && hirq != XICS_IPI);
203 if (hirq)
204 goto skip_ipi;
205 }
206
207
208 if (hirq == XICS_DUMMY)
209 goto skip_ipi;
210
211
212 if (!hirq) {
213 pending &= ~(1 << prio);
214
215
216
217
218
219 if (atomic_read(&q->pending_count)) {
220 int p = atomic_xchg(&q->pending_count, 0);
221
222 if (p) {
223 WARN_ON(p > atomic_read(&q->count));
224 atomic_sub(p, &q->count);
225 }
226 }
227 }
228
229
230
231
232
233
234 if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
235 prio = xc->mfrr;
236 hirq = XICS_IPI;
237 break;
238 }
239
240
241 if (scan_type == scan_fetch) {
242 q->idx = idx;
243 q->toggle = toggle;
244 }
245 }
246
247
248 if (scan_type == scan_poll)
249 return hirq;
250
251
252 xc->pending = pending;
253
254
255
256
257
258
259 if (scan_type == scan_eoi)
260 return hirq;
261
262
263
264
265
266
267
268
269
270
271 if (hirq)
272 xc->cppr = prio;
273
274
275
276
277
278
279 if (xc->cppr != xc->hw_cppr) {
280 xc->hw_cppr = xc->cppr;
281 __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR);
282 }
283
284 return hirq;
285}
286
287static unsigned long xive_vm_h_xirr(struct kvm_vcpu *vcpu)
288{
289 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
290 u8 old_cppr;
291 u32 hirq;
292
293 pr_devel("H_XIRR\n");
294
295 xc->stat_vm_h_xirr++;
296
297
298 xive_vm_ack_pending(xc);
299
300 pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
301 xc->pending, xc->hw_cppr, xc->cppr);
302
303
304 old_cppr = xive_prio_to_guest(xc->cppr);
305
306
307 hirq = xive_vm_scan_interrupts(xc, xc->pending, scan_fetch);
308
309 pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n",
310 hirq, xc->hw_cppr, xc->cppr);
311
312
313 if (hirq & 0xff000000)
314 pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq);
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331 vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24);
332
333 return H_SUCCESS;
334}
335
336static unsigned long xive_vm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
337{
338 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
339 u8 pending = xc->pending;
340 u32 hirq;
341
342 pr_devel("H_IPOLL(server=%ld)\n", server);
343
344 xc->stat_vm_h_ipoll++;
345
346
347 if (xc->server_num != server) {
348 vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
349 if (!vcpu)
350 return H_PARAMETER;
351 xc = vcpu->arch.xive_vcpu;
352
353
354 pending = 0xff;
355 } else {
356
357 __be64 qw1 = __raw_readq(xive_tima + TM_QW1_OS);
358 u8 pipr = be64_to_cpu(qw1) & 0xff;
359
360 if (pipr < 8)
361 pending |= 1 << pipr;
362 }
363
364 hirq = xive_vm_scan_interrupts(xc, pending, scan_poll);
365
366
367 vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24);
368
369 return H_SUCCESS;
370}
371
372static void xive_vm_push_pending_to_hw(struct kvmppc_xive_vcpu *xc)
373{
374 u8 pending, prio;
375
376 pending = xc->pending;
377 if (xc->mfrr != 0xff) {
378 if (xc->mfrr < 8)
379 pending |= 1 << xc->mfrr;
380 else
381 pending |= 0x80;
382 }
383 if (!pending)
384 return;
385 prio = ffs(pending) - 1;
386
387 __raw_writeb(prio, xive_tima + TM_SPC_SET_OS_PENDING);
388}
389
390static void xive_vm_scan_for_rerouted_irqs(struct kvmppc_xive *xive,
391 struct kvmppc_xive_vcpu *xc)
392{
393 unsigned int prio;
394
395
396 for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
397 struct xive_q *q = &xc->queues[prio];
398 struct kvmppc_xive_irq_state *state;
399 struct kvmppc_xive_src_block *sb;
400 u32 idx, toggle, entry, irq, hw_num;
401 struct xive_irq_data *xd;
402 __be32 *qpage;
403 u16 src;
404
405 idx = q->idx;
406 toggle = q->toggle;
407 qpage = READ_ONCE(q->qpage);
408 if (!qpage)
409 continue;
410
411
412 for (;;) {
413 entry = be32_to_cpup(qpage + idx);
414
415
416 if ((entry >> 31) == toggle)
417 break;
418 irq = entry & 0x7fffffff;
419
420
421 if (irq == XICS_DUMMY || irq == XICS_IPI)
422 goto next;
423 sb = kvmppc_xive_find_source(xive, irq, &src);
424 if (!sb)
425 goto next;
426 state = &sb->irq_state[src];
427
428
429 if (xc->server_num == state->act_server)
430 goto next;
431
432
433
434
435
436 qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY);
437
438
439 kvmppc_xive_select_irq(state, &hw_num, &xd);
440
441
442 if (!(xd->flags & XIVE_IRQ_FLAG_LSI))
443 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
444
445
446 xive_vm_source_eoi(hw_num, xd);
447
448next:
449 idx = (idx + 1) & q->msk;
450 if (idx == 0)
451 toggle ^= 1;
452 }
453 }
454}
455
456static int xive_vm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
457{
458 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
459 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
460 u8 old_cppr;
461
462 pr_devel("H_CPPR(cppr=%ld)\n", cppr);
463
464 xc->stat_vm_h_cppr++;
465
466
467 cppr = xive_prio_from_guest(cppr);
468
469
470 old_cppr = xc->cppr;
471 xc->cppr = cppr;
472
473
474
475
476
477 smp_mb();
478
479 if (cppr > old_cppr) {
480
481
482
483
484
485
486 xive_vm_push_pending_to_hw(xc);
487 } else {
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505 xive_vm_scan_for_rerouted_irqs(xive, xc);
506 }
507
508
509 xc->hw_cppr = cppr;
510 __raw_writeb(cppr, xive_tima + TM_QW1_OS + TM_CPPR);
511
512 return H_SUCCESS;
513}
514
515static int xive_vm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
516{
517 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
518 struct kvmppc_xive_src_block *sb;
519 struct kvmppc_xive_irq_state *state;
520 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
521 struct xive_irq_data *xd;
522 u8 new_cppr = xirr >> 24;
523 u32 irq = xirr & 0x00ffffff, hw_num;
524 u16 src;
525 int rc = 0;
526
527 pr_devel("H_EOI(xirr=%08lx)\n", xirr);
528
529 xc->stat_vm_h_eoi++;
530
531 xc->cppr = xive_prio_from_guest(new_cppr);
532
533
534
535
536
537
538
539 if (irq == XICS_IPI || irq == 0) {
540
541
542
543
544
545 smp_mb();
546 goto bail;
547 }
548
549
550 sb = kvmppc_xive_find_source(xive, irq, &src);
551 if (!sb) {
552 pr_devel(" source not found !\n");
553 rc = H_PARAMETER;
554
555 smp_mb();
556 goto bail;
557 }
558 state = &sb->irq_state[src];
559 kvmppc_xive_select_irq(state, &hw_num, &xd);
560
561 state->in_eoi = true;
562
563
564
565
566
567
568
569 smp_mb();
570
571again:
572 if (state->guest_priority == MASKED) {
573 arch_spin_lock(&sb->lock);
574 if (state->guest_priority != MASKED) {
575 arch_spin_unlock(&sb->lock);
576 goto again;
577 }
578 pr_devel(" EOI on saved P...\n");
579
580
581 state->old_p = false;
582
583 arch_spin_unlock(&sb->lock);
584 } else {
585 pr_devel(" EOI on source...\n");
586
587
588 xive_vm_source_eoi(hw_num, xd);
589
590
591 if (state->lsi && state->asserted)
592 __raw_writeq(0, __x_trig_page(xd));
593
594 }
595
596
597
598
599
600
601
602
603
604 mb();
605 state->in_eoi = false;
606bail:
607
608
609 xive_vm_scan_interrupts(xc, xc->pending, scan_eoi);
610 xive_vm_push_pending_to_hw(xc);
611 pr_devel(" after scan pending=%02x\n", xc->pending);
612
613
614 xc->hw_cppr = xc->cppr;
615 __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR);
616
617 return rc;
618}
619
620static int xive_vm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
621 unsigned long mfrr)
622{
623 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
624
625 pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr);
626
627 xc->stat_vm_h_ipi++;
628
629
630 vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
631 if (!vcpu)
632 return H_PARAMETER;
633 xc = vcpu->arch.xive_vcpu;
634
635
636 xc->mfrr = mfrr;
637
638
639
640
641
642
643
644
645
646
647
648 mb();
649
650
651 if (mfrr < xc->cppr)
652 __raw_writeq(0, __x_trig_page(&xc->vp_ipi_data));
653
654 return H_SUCCESS;
655}
656
657
658
659
660
661#define XIVE_Q_GAP 2
662
663static bool kvmppc_xive_vcpu_has_save_restore(struct kvm_vcpu *vcpu)
664{
665 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
666
667
668 return xc->vp_cam & TM_QW1W2_HO;
669}
670
671bool kvmppc_xive_check_save_restore(struct kvm_vcpu *vcpu)
672{
673 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
674 struct kvmppc_xive *xive = xc->xive;
675
676 if (xive->flags & KVMPPC_XIVE_FLAG_SAVE_RESTORE)
677 return kvmppc_xive_vcpu_has_save_restore(vcpu);
678
679 return true;
680}
681
682
683
684
685
686void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
687{
688 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
689 u64 pq;
690
691
692
693
694
695
696 if (!tima || !vcpu->arch.xive_cam_word)
697 return;
698
699 eieio();
700 if (!kvmppc_xive_vcpu_has_save_restore(vcpu))
701 __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
702 __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
703 vcpu->arch.xive_pushed = 1;
704 eieio();
705
706
707
708
709
710
711
712
713 vcpu->arch.irq_pending = 0;
714
715
716
717
718
719 if (vcpu->arch.xive_esc_on) {
720 pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
721 XIVE_ESB_SET_PQ_01));
722 mb();
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745 if (!(pq & XIVE_ESB_VAL_P))
746
747 vcpu->arch.xive_esc_on = 0;
748 }
749}
750EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
751
752
753
754
755
756void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu)
757{
758 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
759
760 if (!vcpu->arch.xive_pushed)
761 return;
762
763
764
765
766 if (WARN_ON(!tima))
767 return;
768
769 eieio();
770
771 __raw_readl(tima + TM_SPC_PULL_OS_CTX);
772
773 if (!kvmppc_xive_vcpu_has_save_restore(vcpu))
774 vcpu->arch.xive_saved_state.w01 = __raw_readq(tima + TM_QW1_OS);
775
776
777 vcpu->arch.xive_saved_state.lsmfb = 0;
778 vcpu->arch.xive_saved_state.ack = 0xff;
779 vcpu->arch.xive_pushed = 0;
780 eieio();
781}
782EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu);
783
784bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
785{
786 void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr;
787 bool ret = true;
788
789 if (!esc_vaddr)
790 return ret;
791
792
793
794 if (vcpu->arch.xive_esc_on) {
795
796
797
798
799
800
801
802
803 ret = false;
804
805
806
807
808
809 __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_10);
810 } else {
811 vcpu->arch.xive_esc_on = true;
812 mb();
813 __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_00);
814 }
815 mb();
816
817 return ret;
818}
819EXPORT_SYMBOL_GPL(kvmppc_xive_rearm_escalation);
820
821
822
823
824
825static bool xive_irq_trigger(struct xive_irq_data *xd)
826{
827
828 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
829 return false;
830
831
832 if (WARN_ON(!xd->trig_mmio))
833 return false;
834
835 out_be64(xd->trig_mmio, 0);
836
837 return true;
838}
839
840static irqreturn_t xive_esc_irq(int irq, void *data)
841{
842 struct kvm_vcpu *vcpu = data;
843
844 vcpu->arch.irq_pending = 1;
845 smp_mb();
846 if (vcpu->arch.ceded || vcpu->arch.nested)
847 kvmppc_fast_vcpu_kick(vcpu);
848
849
850
851
852
853
854
855
856
857
858 vcpu->arch.xive_esc_on = false;
859
860
861 smp_wmb();
862
863 return IRQ_HANDLED;
864}
865
866int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
867 bool single_escalation)
868{
869 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
870 struct xive_q *q = &xc->queues[prio];
871 char *name = NULL;
872 int rc;
873
874
875 if (xc->esc_virq[prio])
876 return 0;
877
878
879 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
880 if (!xc->esc_virq[prio]) {
881 pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
882 prio, xc->server_num);
883 return -EIO;
884 }
885
886 if (single_escalation)
887 name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
888 vcpu->kvm->arch.lpid, xc->server_num);
889 else
890 name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
891 vcpu->kvm->arch.lpid, xc->server_num, prio);
892 if (!name) {
893 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
894 prio, xc->server_num);
895 rc = -ENOMEM;
896 goto error;
897 }
898
899 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
900
901 rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
902 IRQF_NO_THREAD, name, vcpu);
903 if (rc) {
904 pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
905 prio, xc->server_num);
906 goto error;
907 }
908 xc->esc_virq_names[prio] = name;
909
910
911
912
913
914
915
916
917
918 if (single_escalation) {
919 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
920 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
921
922 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
923 vcpu->arch.xive_esc_raddr = xd->eoi_page;
924 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
925 xd->flags |= XIVE_IRQ_FLAG_NO_EOI;
926 }
927
928 return 0;
929error:
930 irq_dispose_mapping(xc->esc_virq[prio]);
931 xc->esc_virq[prio] = 0;
932 kfree(name);
933 return rc;
934}
935
936static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
937{
938 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
939 struct kvmppc_xive *xive = xc->xive;
940 struct xive_q *q = &xc->queues[prio];
941 void *qpage;
942 int rc;
943
944 if (WARN_ON(q->qpage))
945 return 0;
946
947
948 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
949 if (!qpage) {
950 pr_err("Failed to allocate queue %d for VCPU %d\n",
951 prio, xc->server_num);
952 return -ENOMEM;
953 }
954 memset(qpage, 0, 1 << xive->q_order);
955
956
957
958
959
960
961
962
963 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
964 xive->q_order, true);
965 if (rc)
966 pr_err("Failed to configure queue %d for VCPU %d\n",
967 prio, xc->server_num);
968 return rc;
969}
970
971
972static int xive_check_provisioning(struct kvm *kvm, u8 prio)
973{
974 struct kvmppc_xive *xive = kvm->arch.xive;
975 struct kvm_vcpu *vcpu;
976 unsigned long i;
977 int rc;
978
979 lockdep_assert_held(&xive->lock);
980
981
982 if (xive->qmap & (1 << prio))
983 return 0;
984
985 pr_devel("Provisioning prio... %d\n", prio);
986
987
988 kvm_for_each_vcpu(i, vcpu, kvm) {
989 if (!vcpu->arch.xive_vcpu)
990 continue;
991 rc = xive_provision_queue(vcpu, prio);
992 if (rc == 0 && !kvmppc_xive_has_single_escalation(xive))
993 kvmppc_xive_attach_escalation(vcpu, prio,
994 kvmppc_xive_has_single_escalation(xive));
995 if (rc)
996 return rc;
997 }
998
999
1000 mb();
1001 xive->qmap |= (1 << prio);
1002 return 0;
1003}
1004
1005static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
1006{
1007 struct kvm_vcpu *vcpu;
1008 struct kvmppc_xive_vcpu *xc;
1009 struct xive_q *q;
1010
1011
1012 vcpu = kvmppc_xive_find_server(kvm, server);
1013 if (!vcpu) {
1014 pr_warn("%s: Can't find server %d\n", __func__, server);
1015 return;
1016 }
1017 xc = vcpu->arch.xive_vcpu;
1018 if (WARN_ON(!xc))
1019 return;
1020
1021 q = &xc->queues[prio];
1022 atomic_inc(&q->pending_count);
1023}
1024
1025static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
1026{
1027 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1028 struct xive_q *q;
1029 u32 max;
1030
1031 if (WARN_ON(!xc))
1032 return -ENXIO;
1033 if (!xc->valid)
1034 return -ENXIO;
1035
1036 q = &xc->queues[prio];
1037 if (WARN_ON(!q->qpage))
1038 return -ENXIO;
1039
1040
1041 max = (q->msk + 1) - XIVE_Q_GAP;
1042 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
1043}
1044
1045int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
1046{
1047 struct kvm_vcpu *vcpu;
1048 unsigned long i;
1049 int rc;
1050
1051
1052 vcpu = kvmppc_xive_find_server(kvm, *server);
1053 if (!vcpu) {
1054 pr_devel("Can't find server %d\n", *server);
1055 return -EINVAL;
1056 }
1057
1058 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
1059
1060
1061 rc = xive_try_pick_queue(vcpu, prio);
1062 if (rc == 0)
1063 return rc;
1064
1065 pr_devel(" .. failed, looking up candidate...\n");
1066
1067
1068 kvm_for_each_vcpu(i, vcpu, kvm) {
1069 if (!vcpu->arch.xive_vcpu)
1070 continue;
1071 rc = xive_try_pick_queue(vcpu, prio);
1072 if (rc == 0) {
1073 *server = vcpu->arch.xive_vcpu->server_num;
1074 pr_devel(" found on 0x%x/%d\n", *server, prio);
1075 return rc;
1076 }
1077 }
1078 pr_devel(" no available target !\n");
1079
1080
1081 return -EBUSY;
1082}
1083
1084static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
1085 struct kvmppc_xive_src_block *sb,
1086 struct kvmppc_xive_irq_state *state)
1087{
1088 struct xive_irq_data *xd;
1089 u32 hw_num;
1090 u8 old_prio;
1091 u64 val;
1092
1093
1094
1095
1096
1097 for (;;) {
1098 arch_spin_lock(&sb->lock);
1099 old_prio = state->guest_priority;
1100 state->guest_priority = MASKED;
1101 mb();
1102 if (!state->in_eoi)
1103 break;
1104 state->guest_priority = old_prio;
1105 arch_spin_unlock(&sb->lock);
1106 }
1107
1108
1109 if (old_prio == MASKED)
1110 return old_prio;
1111
1112
1113 kvmppc_xive_select_irq(state, &hw_num, &xd);
1114
1115
1116 val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
1117 state->old_p = !!(val & 2);
1118 state->old_q = !!(val & 1);
1119
1120
1121
1122
1123
1124 xive_native_sync_source(hw_num);
1125
1126 return old_prio;
1127}
1128
1129static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
1130 struct kvmppc_xive_irq_state *state)
1131{
1132
1133
1134
1135 for (;;) {
1136 arch_spin_lock(&sb->lock);
1137 if (!state->in_eoi)
1138 break;
1139 arch_spin_unlock(&sb->lock);
1140 }
1141}
1142
1143static void xive_finish_unmask(struct kvmppc_xive *xive,
1144 struct kvmppc_xive_src_block *sb,
1145 struct kvmppc_xive_irq_state *state,
1146 u8 prio)
1147{
1148 struct xive_irq_data *xd;
1149 u32 hw_num;
1150
1151
1152 if (state->guest_priority != MASKED)
1153 goto bail;
1154
1155
1156 kvmppc_xive_select_irq(state, &hw_num, &xd);
1157
1158
1159 if (state->old_q)
1160 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
1161
1162
1163
1164
1165
1166
1167 if (!state->old_p)
1168 xive_vm_source_eoi(hw_num, xd);
1169
1170
1171 mb();
1172bail:
1173 state->guest_priority = prio;
1174}
1175
1176
1177
1178
1179
1180
1181
1182
1183static int xive_target_interrupt(struct kvm *kvm,
1184 struct kvmppc_xive_irq_state *state,
1185 u32 server, u8 prio)
1186{
1187 struct kvmppc_xive *xive = kvm->arch.xive;
1188 u32 hw_num;
1189 int rc;
1190
1191
1192
1193
1194
1195
1196 rc = kvmppc_xive_select_target(kvm, &server, prio);
1197
1198
1199
1200
1201
1202 if (rc)
1203 return rc;
1204
1205
1206
1207
1208
1209
1210 if (state->act_priority != MASKED)
1211 xive_inc_q_pending(kvm,
1212 state->act_server,
1213 state->act_priority);
1214
1215
1216
1217 state->act_priority = prio;
1218 state->act_server = server;
1219
1220
1221 kvmppc_xive_select_irq(state, &hw_num, NULL);
1222
1223 return xive_native_configure_irq(hw_num,
1224 kvmppc_xive_vp(xive, server),
1225 prio, state->number);
1226}
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
1269 u32 priority)
1270{
1271 struct kvmppc_xive *xive = kvm->arch.xive;
1272 struct kvmppc_xive_src_block *sb;
1273 struct kvmppc_xive_irq_state *state;
1274 u8 new_act_prio;
1275 int rc = 0;
1276 u16 idx;
1277
1278 if (!xive)
1279 return -ENODEV;
1280
1281 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
1282 irq, server, priority);
1283
1284
1285 if (priority != MASKED) {
1286 mutex_lock(&xive->lock);
1287 rc = xive_check_provisioning(xive->kvm,
1288 xive_prio_from_guest(priority));
1289 mutex_unlock(&xive->lock);
1290 }
1291 if (rc) {
1292 pr_devel(" provisioning failure %d !\n", rc);
1293 return rc;
1294 }
1295
1296 sb = kvmppc_xive_find_source(xive, irq, &idx);
1297 if (!sb)
1298 return -EINVAL;
1299 state = &sb->irq_state[idx];
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315 if (priority == MASKED)
1316 xive_lock_and_mask(xive, sb, state);
1317 else
1318 xive_lock_for_unmask(sb, state);
1319
1320
1321
1322
1323
1324
1325
1326 new_act_prio = state->act_priority;
1327 if (priority != MASKED)
1328 new_act_prio = xive_prio_from_guest(priority);
1329
1330 pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
1331 new_act_prio, state->act_server, state->act_priority);
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347 if (new_act_prio != MASKED &&
1348 (state->act_server != server ||
1349 state->act_priority != new_act_prio))
1350 rc = xive_target_interrupt(kvm, state, server, new_act_prio);
1351
1352
1353
1354
1355
1356 if (priority != MASKED)
1357 xive_finish_unmask(xive, sb, state, priority);
1358
1359
1360
1361
1362
1363 state->saved_priority = priority;
1364
1365 arch_spin_unlock(&sb->lock);
1366 return rc;
1367}
1368
1369int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
1370 u32 *priority)
1371{
1372 struct kvmppc_xive *xive = kvm->arch.xive;
1373 struct kvmppc_xive_src_block *sb;
1374 struct kvmppc_xive_irq_state *state;
1375 u16 idx;
1376
1377 if (!xive)
1378 return -ENODEV;
1379
1380 sb = kvmppc_xive_find_source(xive, irq, &idx);
1381 if (!sb)
1382 return -EINVAL;
1383 state = &sb->irq_state[idx];
1384 arch_spin_lock(&sb->lock);
1385 *server = state->act_server;
1386 *priority = state->guest_priority;
1387 arch_spin_unlock(&sb->lock);
1388
1389 return 0;
1390}
1391
1392int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
1393{
1394 struct kvmppc_xive *xive = kvm->arch.xive;
1395 struct kvmppc_xive_src_block *sb;
1396 struct kvmppc_xive_irq_state *state;
1397 u16 idx;
1398
1399 if (!xive)
1400 return -ENODEV;
1401
1402 sb = kvmppc_xive_find_source(xive, irq, &idx);
1403 if (!sb)
1404 return -EINVAL;
1405 state = &sb->irq_state[idx];
1406
1407 pr_devel("int_on(irq=0x%x)\n", irq);
1408
1409
1410
1411
1412 if (state->act_priority == MASKED) {
1413 pr_devel("int_on on untargetted interrupt\n");
1414 return -EINVAL;
1415 }
1416
1417
1418 if (state->saved_priority == MASKED)
1419 return 0;
1420
1421
1422
1423
1424 xive_lock_for_unmask(sb, state);
1425 xive_finish_unmask(xive, sb, state, state->saved_priority);
1426 arch_spin_unlock(&sb->lock);
1427
1428 return 0;
1429}
1430
1431int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
1432{
1433 struct kvmppc_xive *xive = kvm->arch.xive;
1434 struct kvmppc_xive_src_block *sb;
1435 struct kvmppc_xive_irq_state *state;
1436 u16 idx;
1437
1438 if (!xive)
1439 return -ENODEV;
1440
1441 sb = kvmppc_xive_find_source(xive, irq, &idx);
1442 if (!sb)
1443 return -EINVAL;
1444 state = &sb->irq_state[idx];
1445
1446 pr_devel("int_off(irq=0x%x)\n", irq);
1447
1448
1449
1450
1451 state->saved_priority = xive_lock_and_mask(xive, sb, state);
1452 arch_spin_unlock(&sb->lock);
1453
1454 return 0;
1455}
1456
1457static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
1458{
1459 struct kvmppc_xive_src_block *sb;
1460 struct kvmppc_xive_irq_state *state;
1461 u16 idx;
1462
1463 sb = kvmppc_xive_find_source(xive, irq, &idx);
1464 if (!sb)
1465 return false;
1466 state = &sb->irq_state[idx];
1467 if (!state->valid)
1468 return false;
1469
1470
1471
1472
1473
1474 xive_irq_trigger(&state->ipi_data);
1475
1476 return true;
1477}
1478
1479u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
1480{
1481 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1482
1483 if (!xc)
1484 return 0;
1485
1486
1487 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
1488 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
1489 (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
1490}
1491
1492int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
1493{
1494 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1495 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1496 u8 cppr, mfrr;
1497 u32 xisr;
1498
1499 if (!xc || !xive)
1500 return -ENOENT;
1501
1502
1503 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
1504 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
1505 KVM_REG_PPC_ICP_XISR_MASK;
1506 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
1507
1508 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
1509 xc->server_num, cppr, mfrr, xisr);
1510
1511
1512
1513
1514
1515
1516 if (WARN_ON(vcpu->arch.xive_pushed))
1517 return -EIO;
1518
1519
1520 vcpu->arch.xive_saved_state.cppr = cppr;
1521 xc->hw_cppr = xc->cppr = cppr;
1522
1523
1524
1525
1526
1527
1528
1529 xc->mfrr = mfrr;
1530 if (mfrr < cppr)
1531 xive_irq_trigger(&xc->vp_ipi_data);
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542 if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
1543 xc->delayed_irq = xisr;
1544 xive->delayed_irqs++;
1545 pr_devel(" xisr restore delayed\n");
1546 }
1547
1548 return 0;
1549}
1550
1551int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
1552 unsigned long host_irq)
1553{
1554 struct kvmppc_xive *xive = kvm->arch.xive;
1555 struct kvmppc_xive_src_block *sb;
1556 struct kvmppc_xive_irq_state *state;
1557 struct irq_data *host_data =
1558 irq_domain_get_irq_data(irq_get_default_host(), host_irq);
1559 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
1560 u16 idx;
1561 u8 prio;
1562 int rc;
1563
1564 if (!xive)
1565 return -ENODEV;
1566
1567 pr_debug("%s: GIRQ 0x%lx host IRQ %ld XIVE HW IRQ 0x%x\n",
1568 __func__, guest_irq, host_irq, hw_irq);
1569
1570 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
1571 if (!sb)
1572 return -EINVAL;
1573 state = &sb->irq_state[idx];
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588 rc = irq_set_vcpu_affinity(host_irq, state);
1589 if (rc) {
1590 pr_err("Failed to set VCPU affinity for host IRQ %ld\n", host_irq);
1591 return rc;
1592 }
1593
1594
1595
1596
1597
1598
1599 prio = xive_lock_and_mask(xive, sb, state);
1600 pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
1601 state->old_p, state->old_q);
1602
1603
1604 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1605
1606
1607
1608
1609
1610 if (xive->ops && xive->ops->reset_mapped)
1611 xive->ops->reset_mapped(kvm, guest_irq);
1612
1613
1614 state->pt_number = hw_irq;
1615 state->pt_data = irq_data_get_irq_handler_data(host_data);
1616
1617
1618
1619
1620
1621
1622
1623 xive_native_configure_irq(hw_irq,
1624 kvmppc_xive_vp(xive, state->act_server),
1625 state->act_priority, state->number);
1626
1627
1628
1629
1630
1631
1632
1633
1634 if (prio != MASKED && !state->old_p)
1635 xive_vm_source_eoi(hw_irq, state->pt_data);
1636
1637
1638 state->old_p = state->old_q = false;
1639
1640
1641 mb();
1642 state->guest_priority = prio;
1643 arch_spin_unlock(&sb->lock);
1644
1645 return 0;
1646}
1647EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
1648
1649int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
1650 unsigned long host_irq)
1651{
1652 struct kvmppc_xive *xive = kvm->arch.xive;
1653 struct kvmppc_xive_src_block *sb;
1654 struct kvmppc_xive_irq_state *state;
1655 u16 idx;
1656 u8 prio;
1657 int rc;
1658
1659 if (!xive)
1660 return -ENODEV;
1661
1662 pr_debug("%s: GIRQ 0x%lx host IRQ %ld\n", __func__, guest_irq, host_irq);
1663
1664 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
1665 if (!sb)
1666 return -EINVAL;
1667 state = &sb->irq_state[idx];
1668
1669
1670
1671
1672
1673
1674 prio = xive_lock_and_mask(xive, sb, state);
1675 pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
1676 state->old_p, state->old_q);
1677
1678
1679
1680
1681
1682
1683 if (state->old_p)
1684 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
1685
1686
1687 rc = irq_set_vcpu_affinity(host_irq, NULL);
1688 if (rc) {
1689 pr_err("Failed to clr VCPU affinity for host IRQ %ld\n", host_irq);
1690 return rc;
1691 }
1692
1693
1694 state->pt_number = 0;
1695 state->pt_data = NULL;
1696
1697
1698
1699
1700
1701 if (xive->ops && xive->ops->reset_mapped) {
1702 xive->ops->reset_mapped(kvm, guest_irq);
1703 }
1704
1705
1706 xive_native_configure_irq(state->ipi_number,
1707 kvmppc_xive_vp(xive, state->act_server),
1708 state->act_priority, state->number);
1709
1710
1711
1712
1713
1714
1715 if (prio == MASKED || state->old_p)
1716 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
1717 else
1718 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
1719
1720
1721 mb();
1722 state->guest_priority = prio;
1723 arch_spin_unlock(&sb->lock);
1724
1725 return 0;
1726}
1727EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
1728
1729void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
1730{
1731 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1732 struct kvm *kvm = vcpu->kvm;
1733 struct kvmppc_xive *xive = kvm->arch.xive;
1734 int i, j;
1735
1736 for (i = 0; i <= xive->max_sbid; i++) {
1737 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1738
1739 if (!sb)
1740 continue;
1741 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
1742 struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
1743
1744 if (!state->valid)
1745 continue;
1746 if (state->act_priority == MASKED)
1747 continue;
1748 if (state->act_server != xc->server_num)
1749 continue;
1750
1751
1752 arch_spin_lock(&sb->lock);
1753 state->act_priority = MASKED;
1754 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1755 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
1756 if (state->pt_number) {
1757 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
1758 xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
1759 }
1760 arch_spin_unlock(&sb->lock);
1761 }
1762 }
1763
1764
1765 if (vcpu->arch.xive_esc_on) {
1766 __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
1767 XIVE_ESB_SET_PQ_01));
1768 vcpu->arch.xive_esc_on = false;
1769 }
1770
1771
1772
1773
1774
1775
1776 vcpu->arch.xive_esc_vaddr = 0;
1777 vcpu->arch.xive_esc_raddr = 0;
1778}
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
1789 struct kvmppc_xive_vcpu *xc, int irq)
1790{
1791 struct irq_data *d = irq_get_irq_data(irq);
1792 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1793
1794
1795
1796
1797
1798
1799 xd->stale_p = false;
1800 smp_mb();
1801 if (!vcpu->arch.xive_esc_on)
1802 xd->stale_p = true;
1803}
1804
1805void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1806{
1807 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1808 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1809 int i;
1810
1811 if (!kvmppc_xics_enabled(vcpu))
1812 return;
1813
1814 if (!xc)
1815 return;
1816
1817 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
1818
1819
1820 xc->valid = false;
1821 kvmppc_xive_disable_vcpu_interrupts(vcpu);
1822
1823
1824 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1825
1826
1827 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1828 if (xc->esc_virq[i]) {
1829 if (kvmppc_xive_has_single_escalation(xc->xive))
1830 xive_cleanup_single_escalation(vcpu, xc,
1831 xc->esc_virq[i]);
1832 free_irq(xc->esc_virq[i], vcpu);
1833 irq_dispose_mapping(xc->esc_virq[i]);
1834 kfree(xc->esc_virq_names[i]);
1835 }
1836 }
1837
1838
1839 xive_native_disable_vp(xc->vp_id);
1840
1841
1842 vcpu->arch.xive_cam_word = 0;
1843
1844
1845 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1846 struct xive_q *q = &xc->queues[i];
1847
1848 xive_native_disable_queue(xc->vp_id, q, i);
1849 if (q->qpage) {
1850 free_pages((unsigned long)q->qpage,
1851 xive->q_page_order);
1852 q->qpage = NULL;
1853 }
1854 }
1855
1856
1857 if (xc->vp_ipi) {
1858 xive_cleanup_irq_data(&xc->vp_ipi_data);
1859 xive_native_free_irq(xc->vp_ipi);
1860 }
1861
1862 kfree(xc);
1863
1864
1865 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1866 vcpu->arch.xive_vcpu = NULL;
1867}
1868
1869static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
1870{
1871
1872
1873
1874 return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers;
1875}
1876
1877int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
1878{
1879 u32 vp_id;
1880
1881 if (!kvmppc_xive_vcpu_id_valid(xive, cpu)) {
1882 pr_devel("Out of bounds !\n");
1883 return -EINVAL;
1884 }
1885
1886 if (xive->vp_base == XIVE_INVALID_VP) {
1887 xive->vp_base = xive_native_alloc_vp_block(xive->nr_servers);
1888 pr_devel("VP_Base=%x nr_servers=%d\n", xive->vp_base, xive->nr_servers);
1889
1890 if (xive->vp_base == XIVE_INVALID_VP)
1891 return -ENOSPC;
1892 }
1893
1894 vp_id = kvmppc_xive_vp(xive, cpu);
1895 if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
1896 pr_devel("Duplicate !\n");
1897 return -EEXIST;
1898 }
1899
1900 *vp = vp_id;
1901
1902 return 0;
1903}
1904
1905int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1906 struct kvm_vcpu *vcpu, u32 cpu)
1907{
1908 struct kvmppc_xive *xive = dev->private;
1909 struct kvmppc_xive_vcpu *xc;
1910 int i, r = -EBUSY;
1911 u32 vp_id;
1912
1913 pr_devel("connect_vcpu(cpu=%d)\n", cpu);
1914
1915 if (dev->ops != &kvm_xive_ops) {
1916 pr_devel("Wrong ops !\n");
1917 return -EPERM;
1918 }
1919 if (xive->kvm != vcpu->kvm)
1920 return -EPERM;
1921 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
1922 return -EBUSY;
1923
1924
1925 mutex_lock(&xive->lock);
1926
1927 r = kvmppc_xive_compute_vp_id(xive, cpu, &vp_id);
1928 if (r)
1929 goto bail;
1930
1931 xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1932 if (!xc) {
1933 r = -ENOMEM;
1934 goto bail;
1935 }
1936
1937 vcpu->arch.xive_vcpu = xc;
1938 xc->xive = xive;
1939 xc->vcpu = vcpu;
1940 xc->server_num = cpu;
1941 xc->vp_id = vp_id;
1942 xc->mfrr = 0xff;
1943 xc->valid = true;
1944
1945 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1946 if (r)
1947 goto bail;
1948
1949 if (!kvmppc_xive_check_save_restore(vcpu)) {
1950 pr_err("inconsistent save-restore setup for VCPU %d\n", cpu);
1951 r = -EIO;
1952 goto bail;
1953 }
1954
1955
1956 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
1957 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1958
1959
1960 xc->vp_ipi = xive_native_alloc_irq();
1961 if (!xc->vp_ipi) {
1962 pr_err("Failed to allocate xive irq for VCPU IPI\n");
1963 r = -EIO;
1964 goto bail;
1965 }
1966 pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1967
1968 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1969 if (r)
1970 goto bail;
1971
1972
1973
1974
1975
1976 r = xive_native_enable_vp(xc->vp_id, kvmppc_xive_has_single_escalation(xive));
1977 if (r) {
1978 pr_err("Failed to enable VP in OPAL, err %d\n", r);
1979 goto bail;
1980 }
1981
1982
1983
1984
1985
1986
1987
1988
1989 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1990 struct xive_q *q = &xc->queues[i];
1991
1992
1993 if (i == 7 && kvmppc_xive_has_single_escalation(xive))
1994 break;
1995
1996
1997 if (xive->qmap & (1 << i)) {
1998 r = xive_provision_queue(vcpu, i);
1999 if (r == 0 && !kvmppc_xive_has_single_escalation(xive))
2000 kvmppc_xive_attach_escalation(
2001 vcpu, i, kvmppc_xive_has_single_escalation(xive));
2002 if (r)
2003 goto bail;
2004 } else {
2005 r = xive_native_configure_queue(xc->vp_id,
2006 q, i, NULL, 0, true);
2007 if (r) {
2008 pr_err("Failed to configure queue %d for VCPU %d\n",
2009 i, cpu);
2010 goto bail;
2011 }
2012 }
2013 }
2014
2015
2016 r = kvmppc_xive_attach_escalation(vcpu, 0, kvmppc_xive_has_single_escalation(xive));
2017 if (r)
2018 goto bail;
2019
2020
2021 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
2022 if (!r)
2023 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
2024
2025bail:
2026 mutex_unlock(&xive->lock);
2027 if (r) {
2028 kvmppc_xive_cleanup_vcpu(vcpu);
2029 return r;
2030 }
2031
2032 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
2033 return 0;
2034}
2035
2036
2037
2038
2039static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
2040{
2041 struct kvmppc_xive_src_block *sb;
2042 struct kvmppc_xive_irq_state *state;
2043 u16 idx;
2044
2045 sb = kvmppc_xive_find_source(xive, irq, &idx);
2046 if (!sb)
2047 return;
2048
2049 state = &sb->irq_state[idx];
2050
2051
2052 if (!state->valid) {
2053 pr_err("invalid irq 0x%x in cpu queue!\n", irq);
2054 return;
2055 }
2056
2057
2058
2059
2060
2061
2062 if (!state->saved_p)
2063 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
2064
2065
2066 state->in_queue = true;
2067}
2068
2069static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
2070 struct kvmppc_xive_src_block *sb,
2071 u32 irq)
2072{
2073 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
2074
2075 if (!state->valid)
2076 return;
2077
2078
2079 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
2080
2081
2082 state->saved_p = state->old_p;
2083 state->saved_q = state->old_q;
2084
2085
2086 arch_spin_unlock(&sb->lock);
2087}
2088
2089static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
2090 struct kvmppc_xive_src_block *sb,
2091 u32 irq)
2092{
2093 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
2094
2095 if (!state->valid)
2096 return;
2097
2098
2099
2100
2101
2102
2103 xive_lock_for_unmask(sb, state);
2104
2105
2106 if (state->saved_scan_prio != MASKED)
2107 xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
2108
2109
2110 arch_spin_unlock(&sb->lock);
2111}
2112
2113static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
2114{
2115 u32 idx = q->idx;
2116 u32 toggle = q->toggle;
2117 u32 irq;
2118
2119 do {
2120 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
2121 if (irq > XICS_IPI)
2122 xive_pre_save_set_queued(xive, irq);
2123 } while(irq);
2124}
2125
2126static void xive_pre_save_scan(struct kvmppc_xive *xive)
2127{
2128 struct kvm_vcpu *vcpu = NULL;
2129 unsigned long i;
2130 int j;
2131
2132
2133
2134
2135
2136 for (i = 0; i <= xive->max_sbid; i++) {
2137 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
2138 if (!sb)
2139 continue;
2140 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
2141 xive_pre_save_mask_irq(xive, sb, j);
2142 }
2143
2144
2145 kvm_for_each_vcpu(i, vcpu, xive->kvm) {
2146 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2147 if (!xc)
2148 continue;
2149 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
2150 if (xc->queues[j].qpage)
2151 xive_pre_save_queue(xive, &xc->queues[j]);
2152 }
2153 }
2154
2155
2156 for (i = 0; i <= xive->max_sbid; i++) {
2157 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
2158 if (!sb)
2159 continue;
2160 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
2161 xive_pre_save_unmask_irq(xive, sb, j);
2162 }
2163}
2164
2165static void xive_post_save_scan(struct kvmppc_xive *xive)
2166{
2167 u32 i, j;
2168
2169
2170 for (i = 0; i <= xive->max_sbid; i++) {
2171 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
2172 if (!sb)
2173 continue;
2174 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
2175 sb->irq_state[j].in_queue = false;
2176 }
2177
2178
2179 xive->saved_src_count = 0;
2180}
2181
2182
2183
2184
2185static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
2186{
2187 struct kvmppc_xive_src_block *sb;
2188 struct kvmppc_xive_irq_state *state;
2189 u64 __user *ubufp = (u64 __user *) addr;
2190 u64 val, prio;
2191 u16 idx;
2192
2193 sb = kvmppc_xive_find_source(xive, irq, &idx);
2194 if (!sb)
2195 return -ENOENT;
2196
2197 state = &sb->irq_state[idx];
2198
2199 if (!state->valid)
2200 return -ENOENT;
2201
2202 pr_devel("get_source(%ld)...\n", irq);
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220 if (xive->saved_src_count == 0)
2221 xive_pre_save_scan(xive);
2222 xive->saved_src_count++;
2223
2224
2225 val = state->act_server;
2226 prio = state->saved_scan_prio;
2227
2228 if (prio == MASKED) {
2229 val |= KVM_XICS_MASKED;
2230 prio = state->saved_priority;
2231 }
2232 val |= prio << KVM_XICS_PRIORITY_SHIFT;
2233 if (state->lsi) {
2234 val |= KVM_XICS_LEVEL_SENSITIVE;
2235 if (state->saved_p)
2236 val |= KVM_XICS_PENDING;
2237 } else {
2238 if (state->saved_p)
2239 val |= KVM_XICS_PRESENTED;
2240
2241 if (state->saved_q)
2242 val |= KVM_XICS_QUEUED;
2243
2244
2245
2246
2247
2248
2249
2250 if (state->in_queue || (prio == MASKED && state->saved_q))
2251 val |= KVM_XICS_PENDING;
2252 }
2253
2254
2255
2256
2257
2258 if (xive->saved_src_count == xive->src_count)
2259 xive_post_save_scan(xive);
2260
2261
2262 if (put_user(val, ubufp))
2263 return -EFAULT;
2264
2265 return 0;
2266}
2267
2268struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
2269 struct kvmppc_xive *xive, int irq)
2270{
2271 struct kvmppc_xive_src_block *sb;
2272 int i, bid;
2273
2274 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
2275
2276 mutex_lock(&xive->lock);
2277
2278
2279 if (xive->src_blocks[bid])
2280 goto out;
2281
2282
2283 sb = kzalloc(sizeof(*sb), GFP_KERNEL);
2284 if (!sb)
2285 goto out;
2286
2287 sb->id = bid;
2288
2289 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
2290 sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
2291 sb->irq_state[i].eisn = 0;
2292 sb->irq_state[i].guest_priority = MASKED;
2293 sb->irq_state[i].saved_priority = MASKED;
2294 sb->irq_state[i].act_priority = MASKED;
2295 }
2296 smp_wmb();
2297 xive->src_blocks[bid] = sb;
2298
2299 if (bid > xive->max_sbid)
2300 xive->max_sbid = bid;
2301
2302out:
2303 mutex_unlock(&xive->lock);
2304 return xive->src_blocks[bid];
2305}
2306
2307static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
2308{
2309 struct kvm *kvm = xive->kvm;
2310 struct kvm_vcpu *vcpu = NULL;
2311 unsigned long i;
2312
2313 kvm_for_each_vcpu(i, vcpu, kvm) {
2314 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2315
2316 if (!xc)
2317 continue;
2318
2319 if (xc->delayed_irq == irq) {
2320 xc->delayed_irq = 0;
2321 xive->delayed_irqs--;
2322 return true;
2323 }
2324 }
2325 return false;
2326}
2327
2328static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
2329{
2330 struct kvmppc_xive_src_block *sb;
2331 struct kvmppc_xive_irq_state *state;
2332 u64 __user *ubufp = (u64 __user *) addr;
2333 u16 idx;
2334 u64 val;
2335 u8 act_prio, guest_prio;
2336 u32 server;
2337 int rc = 0;
2338
2339 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
2340 return -ENOENT;
2341
2342 pr_devel("set_source(irq=0x%lx)\n", irq);
2343
2344
2345 sb = kvmppc_xive_find_source(xive, irq, &idx);
2346 if (!sb) {
2347 pr_devel("No source, creating source block...\n");
2348 sb = kvmppc_xive_create_src_block(xive, irq);
2349 if (!sb) {
2350 pr_devel("Failed to create block...\n");
2351 return -ENOMEM;
2352 }
2353 }
2354 state = &sb->irq_state[idx];
2355
2356
2357 if (get_user(val, ubufp)) {
2358 pr_devel("fault getting user info !\n");
2359 return -EFAULT;
2360 }
2361
2362 server = val & KVM_XICS_DESTINATION_MASK;
2363 guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
2364
2365 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
2366 val, server, guest_prio);
2367
2368
2369
2370
2371
2372 if (!state->ipi_number) {
2373 state->ipi_number = xive_native_alloc_irq();
2374 if (state->ipi_number == 0) {
2375 pr_devel("Failed to allocate IPI !\n");
2376 return -ENOMEM;
2377 }
2378 xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
2379 pr_devel(" src_ipi=0x%x\n", state->ipi_number);
2380 }
2381
2382
2383
2384
2385
2386
2387
2388
2389 state->guest_priority = 0;
2390 xive_lock_and_mask(xive, sb, state);
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400 act_prio = xive_prio_from_guest(guest_prio);
2401 state->act_priority = MASKED;
2402
2403
2404
2405
2406
2407
2408 arch_spin_unlock(&sb->lock);
2409
2410
2411 if (act_prio != MASKED) {
2412
2413 mutex_lock(&xive->lock);
2414 rc = xive_check_provisioning(xive->kvm, act_prio);
2415 mutex_unlock(&xive->lock);
2416
2417
2418 if (rc == 0)
2419 rc = xive_target_interrupt(xive->kvm, state,
2420 server, act_prio);
2421
2422
2423
2424
2425
2426 }
2427
2428
2429
2430
2431
2432 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
2433 val |= KVM_XICS_PENDING;
2434 pr_devel(" Found delayed ! forcing PENDING !\n");
2435 }
2436
2437
2438 state->old_p = false;
2439 state->old_q = false;
2440 state->lsi = false;
2441 state->asserted = false;
2442
2443
2444 if (val & KVM_XICS_LEVEL_SENSITIVE) {
2445 state->lsi = true;
2446 if (val & KVM_XICS_PENDING)
2447 state->asserted = true;
2448 pr_devel(" LSI ! Asserted=%d\n", state->asserted);
2449 }
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461 if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
2462 state->old_p = true;
2463 if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
2464 state->old_q = true;
2465
2466 pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q);
2467
2468
2469
2470
2471
2472
2473 if (val & KVM_XICS_MASKED) {
2474 pr_devel(" masked, saving prio\n");
2475 state->guest_priority = MASKED;
2476 state->saved_priority = guest_prio;
2477 } else {
2478 pr_devel(" unmasked, restoring to prio %d\n", guest_prio);
2479 xive_finish_unmask(xive, sb, state, guest_prio);
2480 state->saved_priority = guest_prio;
2481 }
2482
2483
2484 if (!state->valid)
2485 xive->src_count++;
2486 state->valid = true;
2487
2488 return 0;
2489}
2490
2491int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
2492 bool line_status)
2493{
2494 struct kvmppc_xive *xive = kvm->arch.xive;
2495 struct kvmppc_xive_src_block *sb;
2496 struct kvmppc_xive_irq_state *state;
2497 u16 idx;
2498
2499 if (!xive)
2500 return -ENODEV;
2501
2502 sb = kvmppc_xive_find_source(xive, irq, &idx);
2503 if (!sb)
2504 return -EINVAL;
2505
2506
2507 state = &sb->irq_state[idx];
2508 if (!state->valid)
2509 return -EINVAL;
2510
2511
2512 if (state->pt_number)
2513 return -EINVAL;
2514
2515 if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
2516 state->asserted = true;
2517 else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
2518 state->asserted = false;
2519 return 0;
2520 }
2521
2522
2523 xive_irq_trigger(&state->ipi_data);
2524
2525 return 0;
2526}
2527
2528int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr)
2529{
2530 u32 __user *ubufp = (u32 __user *) addr;
2531 u32 nr_servers;
2532 int rc = 0;
2533
2534 if (get_user(nr_servers, ubufp))
2535 return -EFAULT;
2536
2537 pr_devel("%s nr_servers=%u\n", __func__, nr_servers);
2538
2539 if (!nr_servers || nr_servers > KVM_MAX_VCPU_IDS)
2540 return -EINVAL;
2541
2542 mutex_lock(&xive->lock);
2543 if (xive->vp_base != XIVE_INVALID_VP)
2544
2545
2546
2547
2548
2549
2550
2551 rc = -EBUSY;
2552 else if (nr_servers > KVM_MAX_VCPUS)
2553
2554
2555
2556 xive->nr_servers = KVM_MAX_VCPUS;
2557 else
2558 xive->nr_servers = nr_servers;
2559
2560 mutex_unlock(&xive->lock);
2561
2562 return rc;
2563}
2564
2565static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2566{
2567 struct kvmppc_xive *xive = dev->private;
2568
2569
2570 switch (attr->group) {
2571 case KVM_DEV_XICS_GRP_SOURCES:
2572 return xive_set_source(xive, attr->attr, attr->addr);
2573 case KVM_DEV_XICS_GRP_CTRL:
2574 switch (attr->attr) {
2575 case KVM_DEV_XICS_NR_SERVERS:
2576 return kvmppc_xive_set_nr_servers(xive, attr->addr);
2577 }
2578 }
2579 return -ENXIO;
2580}
2581
2582static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2583{
2584 struct kvmppc_xive *xive = dev->private;
2585
2586
2587 switch (attr->group) {
2588 case KVM_DEV_XICS_GRP_SOURCES:
2589 return xive_get_source(xive, attr->attr, attr->addr);
2590 }
2591 return -ENXIO;
2592}
2593
2594static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2595{
2596
2597 switch (attr->group) {
2598 case KVM_DEV_XICS_GRP_SOURCES:
2599 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
2600 attr->attr < KVMPPC_XICS_NR_IRQS)
2601 return 0;
2602 break;
2603 case KVM_DEV_XICS_GRP_CTRL:
2604 switch (attr->attr) {
2605 case KVM_DEV_XICS_NR_SERVERS:
2606 return 0;
2607 }
2608 }
2609 return -ENXIO;
2610}
2611
2612static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
2613{
2614 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
2615 xive_native_configure_irq(hw_num, 0, MASKED, 0);
2616}
2617
2618void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
2619{
2620 int i;
2621
2622 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
2623 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
2624
2625 if (!state->valid)
2626 continue;
2627
2628 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
2629 xive_cleanup_irq_data(&state->ipi_data);
2630 xive_native_free_irq(state->ipi_number);
2631
2632
2633 if (state->pt_number)
2634 kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
2635
2636 state->valid = false;
2637 }
2638}
2639
2640
2641
2642
2643static void kvmppc_xive_release(struct kvm_device *dev)
2644{
2645 struct kvmppc_xive *xive = dev->private;
2646 struct kvm *kvm = xive->kvm;
2647 struct kvm_vcpu *vcpu;
2648 unsigned long i;
2649
2650 pr_devel("Releasing xive device\n");
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661 debugfs_remove(xive->dentry);
2662
2663
2664
2665
2666 kvm_for_each_vcpu(i, vcpu, kvm) {
2667
2668
2669
2670
2671
2672
2673
2674
2675 mutex_lock(&vcpu->mutex);
2676 kvmppc_xive_cleanup_vcpu(vcpu);
2677 mutex_unlock(&vcpu->mutex);
2678 }
2679
2680
2681
2682
2683
2684
2685
2686 kvm->arch.xive = NULL;
2687
2688
2689 for (i = 0; i <= xive->max_sbid; i++) {
2690 if (xive->src_blocks[i])
2691 kvmppc_xive_free_sources(xive->src_blocks[i]);
2692 kfree(xive->src_blocks[i]);
2693 xive->src_blocks[i] = NULL;
2694 }
2695
2696 if (xive->vp_base != XIVE_INVALID_VP)
2697 xive_native_free_vp_block(xive->vp_base);
2698
2699
2700
2701
2702
2703
2704
2705
2706 kfree(dev);
2707}
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type)
2719{
2720 struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ?
2721 &kvm->arch.xive_devices.native :
2722 &kvm->arch.xive_devices.xics_on_xive;
2723 struct kvmppc_xive *xive = *kvm_xive_device;
2724
2725 if (!xive) {
2726 xive = kzalloc(sizeof(*xive), GFP_KERNEL);
2727 *kvm_xive_device = xive;
2728 } else {
2729 memset(xive, 0, sizeof(*xive));
2730 }
2731
2732 return xive;
2733}
2734
2735
2736
2737
2738static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
2739{
2740 struct kvmppc_xive *xive;
2741 struct kvm *kvm = dev->kvm;
2742
2743 pr_devel("Creating xive for partition\n");
2744
2745
2746 if (kvm->arch.xive)
2747 return -EEXIST;
2748
2749 xive = kvmppc_xive_get_device(kvm, type);
2750 if (!xive)
2751 return -ENOMEM;
2752
2753 dev->private = xive;
2754 xive->dev = dev;
2755 xive->kvm = kvm;
2756 mutex_init(&xive->lock);
2757
2758
2759 xive->q_order = xive_native_default_eq_shift();
2760 if (xive->q_order < PAGE_SHIFT)
2761 xive->q_page_order = 0;
2762 else
2763 xive->q_page_order = xive->q_order - PAGE_SHIFT;
2764
2765
2766 xive->vp_base = XIVE_INVALID_VP;
2767
2768
2769
2770 xive->nr_servers = KVM_MAX_VCPUS;
2771
2772 if (xive_native_has_single_escalation())
2773 xive->flags |= KVMPPC_XIVE_FLAG_SINGLE_ESCALATION;
2774
2775 if (xive_native_has_save_restore())
2776 xive->flags |= KVMPPC_XIVE_FLAG_SAVE_RESTORE;
2777
2778 kvm->arch.xive = xive;
2779 return 0;
2780}
2781
2782int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
2783{
2784 struct kvmppc_vcore *vc = vcpu->arch.vcore;
2785
2786
2787 if (!kvmppc_xics_enabled(vcpu))
2788 return H_TOO_HARD;
2789
2790 switch (req) {
2791 case H_XIRR:
2792 return xive_vm_h_xirr(vcpu);
2793 case H_CPPR:
2794 return xive_vm_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
2795 case H_EOI:
2796 return xive_vm_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
2797 case H_IPI:
2798 return xive_vm_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
2799 kvmppc_get_gpr(vcpu, 5));
2800 case H_IPOLL:
2801 return xive_vm_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
2802 case H_XIRR_X:
2803 xive_vm_h_xirr(vcpu);
2804 kvmppc_set_gpr(vcpu, 5, get_tb() + vc->tb_offset);
2805 return H_SUCCESS;
2806 }
2807
2808 return H_UNSUPPORTED;
2809}
2810EXPORT_SYMBOL_GPL(kvmppc_xive_xics_hcall);
2811
2812int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
2813{
2814 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2815 unsigned int i;
2816
2817 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
2818 struct xive_q *q = &xc->queues[i];
2819 u32 i0, i1, idx;
2820
2821 if (!q->qpage && !xc->esc_virq[i])
2822 continue;
2823
2824 if (q->qpage) {
2825 seq_printf(m, " q[%d]: ", i);
2826 idx = q->idx;
2827 i0 = be32_to_cpup(q->qpage + idx);
2828 idx = (idx + 1) & q->msk;
2829 i1 = be32_to_cpup(q->qpage + idx);
2830 seq_printf(m, "T=%d %08x %08x...\n", q->toggle,
2831 i0, i1);
2832 }
2833 if (xc->esc_virq[i]) {
2834 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
2835 struct xive_irq_data *xd =
2836 irq_data_get_irq_handler_data(d);
2837 u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
2838
2839 seq_printf(m, " ESC %d %c%c EOI @%llx",
2840 xc->esc_virq[i],
2841 (pq & XIVE_ESB_VAL_P) ? 'P' : '-',
2842 (pq & XIVE_ESB_VAL_Q) ? 'Q' : '-',
2843 xd->eoi_page);
2844 seq_puts(m, "\n");
2845 }
2846 }
2847 return 0;
2848}
2849
2850void kvmppc_xive_debug_show_sources(struct seq_file *m,
2851 struct kvmppc_xive_src_block *sb)
2852{
2853 int i;
2854
2855 seq_puts(m, " LISN HW/CHIP TYPE PQ EISN CPU/PRIO\n");
2856 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
2857 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
2858 struct xive_irq_data *xd;
2859 u64 pq;
2860 u32 hw_num;
2861
2862 if (!state->valid)
2863 continue;
2864
2865 kvmppc_xive_select_irq(state, &hw_num, &xd);
2866
2867 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
2868
2869 seq_printf(m, "%08x %08x/%02x", state->number, hw_num,
2870 xd->src_chip);
2871 if (state->lsi)
2872 seq_printf(m, " %cLSI", state->asserted ? '^' : ' ');
2873 else
2874 seq_puts(m, " MSI");
2875
2876 seq_printf(m, " %s %c%c %08x % 4d/%d",
2877 state->ipi_number == hw_num ? "IPI" : " PT",
2878 pq & XIVE_ESB_VAL_P ? 'P' : '-',
2879 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
2880 state->eisn, state->act_server,
2881 state->act_priority);
2882
2883 seq_puts(m, "\n");
2884 }
2885}
2886
2887static int xive_debug_show(struct seq_file *m, void *private)
2888{
2889 struct kvmppc_xive *xive = m->private;
2890 struct kvm *kvm = xive->kvm;
2891 struct kvm_vcpu *vcpu;
2892 u64 t_rm_h_xirr = 0;
2893 u64 t_rm_h_ipoll = 0;
2894 u64 t_rm_h_cppr = 0;
2895 u64 t_rm_h_eoi = 0;
2896 u64 t_rm_h_ipi = 0;
2897 u64 t_vm_h_xirr = 0;
2898 u64 t_vm_h_ipoll = 0;
2899 u64 t_vm_h_cppr = 0;
2900 u64 t_vm_h_eoi = 0;
2901 u64 t_vm_h_ipi = 0;
2902 unsigned long i;
2903
2904 if (!kvm)
2905 return 0;
2906
2907 seq_puts(m, "=========\nVCPU state\n=========\n");
2908
2909 kvm_for_each_vcpu(i, vcpu, kvm) {
2910 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2911
2912 if (!xc)
2913 continue;
2914
2915 seq_printf(m, "VCPU %d: VP:%#x/%02x\n"
2916 " CPPR:%#x HWCPPR:%#x MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
2917 xc->server_num, xc->vp_id, xc->vp_chip_id,
2918 xc->cppr, xc->hw_cppr,
2919 xc->mfrr, xc->pending,
2920 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
2921
2922 kvmppc_xive_debug_show_queues(m, vcpu);
2923
2924 t_rm_h_xirr += xc->stat_rm_h_xirr;
2925 t_rm_h_ipoll += xc->stat_rm_h_ipoll;
2926 t_rm_h_cppr += xc->stat_rm_h_cppr;
2927 t_rm_h_eoi += xc->stat_rm_h_eoi;
2928 t_rm_h_ipi += xc->stat_rm_h_ipi;
2929 t_vm_h_xirr += xc->stat_vm_h_xirr;
2930 t_vm_h_ipoll += xc->stat_vm_h_ipoll;
2931 t_vm_h_cppr += xc->stat_vm_h_cppr;
2932 t_vm_h_eoi += xc->stat_vm_h_eoi;
2933 t_vm_h_ipi += xc->stat_vm_h_ipi;
2934 }
2935
2936 seq_puts(m, "Hcalls totals\n");
2937 seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
2938 seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
2939 seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
2940 seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
2941 seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
2942
2943 seq_puts(m, "=========\nSources\n=========\n");
2944
2945 for (i = 0; i <= xive->max_sbid; i++) {
2946 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
2947
2948 if (sb) {
2949 arch_spin_lock(&sb->lock);
2950 kvmppc_xive_debug_show_sources(m, sb);
2951 arch_spin_unlock(&sb->lock);
2952 }
2953 }
2954
2955 return 0;
2956}
2957
2958DEFINE_SHOW_ATTRIBUTE(xive_debug);
2959
2960static void xive_debugfs_init(struct kvmppc_xive *xive)
2961{
2962 xive->dentry = debugfs_create_file("xive", S_IRUGO, xive->kvm->debugfs_dentry,
2963 xive, &xive_debug_fops);
2964
2965 pr_debug("%s: created\n", __func__);
2966}
2967
2968static void kvmppc_xive_init(struct kvm_device *dev)
2969{
2970 struct kvmppc_xive *xive = dev->private;
2971
2972
2973 xive_debugfs_init(xive);
2974}
2975
2976struct kvm_device_ops kvm_xive_ops = {
2977 .name = "kvm-xive",
2978 .create = kvmppc_xive_create,
2979 .init = kvmppc_xive_init,
2980 .release = kvmppc_xive_release,
2981 .set_attr = xive_set_attr,
2982 .get_attr = xive_get_attr,
2983 .has_attr = xive_has_attr,
2984};
2985