1
2
3
4
5
6#define pr_fmt(fmt) "xive-kvm: " fmt
7
8#include <linux/kernel.h>
9#include <linux/kvm_host.h>
10#include <linux/err.h>
11#include <linux/gfp.h>
12#include <linux/spinlock.h>
13#include <linux/delay.h>
14#include <linux/percpu.h>
15#include <linux/cpumask.h>
16#include <linux/uaccess.h>
17#include <linux/irqdomain.h>
18#include <asm/kvm_book3s.h>
19#include <asm/kvm_ppc.h>
20#include <asm/hvcall.h>
21#include <asm/xics.h>
22#include <asm/xive.h>
23#include <asm/xive-regs.h>
24#include <asm/debug.h>
25#include <asm/debugfs.h>
26#include <asm/time.h>
27#include <asm/opal.h>
28
29#include <linux/debugfs.h>
30#include <linux/seq_file.h>
31
32#include "book3s_xive.h"
33
34
35
36
37
38
39
40
41
42#define XIVE_RUNTIME_CHECKS
43#define X_PFX xive_vm_
44#define X_STATIC static
45#define X_STAT_PFX stat_vm_
46#define __x_tima xive_tima
47#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
48#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
49#define __x_writeb __raw_writeb
50#define __x_readw __raw_readw
51#define __x_readq __raw_readq
52#define __x_writeq __raw_writeq
53
54#include "book3s_xive_template.c"
55
56
57
58
59
60#define XIVE_Q_GAP 2
61
62
63
64
65
66void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
67{
68 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
69 u64 pq;
70
71
72
73
74
75
76 if (!tima || !vcpu->arch.xive_cam_word)
77 return;
78
79 eieio();
80 __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
81 __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
82 vcpu->arch.xive_pushed = 1;
83 eieio();
84
85
86
87
88
89
90
91
92 vcpu->arch.irq_pending = 0;
93
94
95
96
97
98 if (vcpu->arch.xive_esc_on) {
99 pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
100 XIVE_ESB_SET_PQ_01));
101 mb();
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124 if (!(pq & XIVE_ESB_VAL_P))
125
126 vcpu->arch.xive_esc_on = 0;
127 }
128}
129EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
130
131
132
133
134
135void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu)
136{
137 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
138
139 if (!vcpu->arch.xive_pushed)
140 return;
141
142
143
144
145 if (WARN_ON(!tima))
146 return;
147
148 eieio();
149
150 __raw_readl(tima + TM_SPC_PULL_OS_CTX);
151
152 vcpu->arch.xive_saved_state.w01 = __raw_readq(tima + TM_QW1_OS);
153
154
155 vcpu->arch.xive_saved_state.lsmfb = 0;
156 vcpu->arch.xive_saved_state.ack = 0xff;
157 vcpu->arch.xive_pushed = 0;
158 eieio();
159}
160EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu);
161
162void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
163{
164 void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr;
165
166 if (!esc_vaddr)
167 return;
168
169
170
171 if (vcpu->arch.xive_esc_on) {
172
173
174
175
176
177
178
179
180 vcpu->arch.ceded = 0;
181
182
183
184
185
186 __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_10);
187 } else {
188 vcpu->arch.xive_esc_on = true;
189 mb();
190 __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_00);
191 }
192 mb();
193}
194EXPORT_SYMBOL_GPL(kvmppc_xive_rearm_escalation);
195
196
197
198
199
200static bool xive_irq_trigger(struct xive_irq_data *xd)
201{
202
203 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
204 return false;
205
206
207 if (WARN_ON(!xd->trig_mmio))
208 return false;
209
210 out_be64(xd->trig_mmio, 0);
211
212 return true;
213}
214
215static irqreturn_t xive_esc_irq(int irq, void *data)
216{
217 struct kvm_vcpu *vcpu = data;
218
219 vcpu->arch.irq_pending = 1;
220 smp_mb();
221 if (vcpu->arch.ceded)
222 kvmppc_fast_vcpu_kick(vcpu);
223
224
225
226
227
228
229
230
231
232
233 vcpu->arch.xive_esc_on = false;
234
235
236 smp_wmb();
237
238 return IRQ_HANDLED;
239}
240
241int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
242 bool single_escalation)
243{
244 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
245 struct xive_q *q = &xc->queues[prio];
246 char *name = NULL;
247 int rc;
248
249
250 if (xc->esc_virq[prio])
251 return 0;
252
253
254 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
255 if (!xc->esc_virq[prio]) {
256 pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
257 prio, xc->server_num);
258 return -EIO;
259 }
260
261 if (single_escalation)
262 name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
263 vcpu->kvm->arch.lpid, xc->server_num);
264 else
265 name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
266 vcpu->kvm->arch.lpid, xc->server_num, prio);
267 if (!name) {
268 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
269 prio, xc->server_num);
270 rc = -ENOMEM;
271 goto error;
272 }
273
274 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
275
276 rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
277 IRQF_NO_THREAD, name, vcpu);
278 if (rc) {
279 pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
280 prio, xc->server_num);
281 goto error;
282 }
283 xc->esc_virq_names[prio] = name;
284
285
286
287
288
289
290
291
292
293 if (single_escalation) {
294 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
295 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
296
297 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
298 vcpu->arch.xive_esc_raddr = xd->eoi_page;
299 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
300 xd->flags |= XIVE_IRQ_FLAG_NO_EOI;
301 }
302
303 return 0;
304error:
305 irq_dispose_mapping(xc->esc_virq[prio]);
306 xc->esc_virq[prio] = 0;
307 kfree(name);
308 return rc;
309}
310
311static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
312{
313 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
314 struct kvmppc_xive *xive = xc->xive;
315 struct xive_q *q = &xc->queues[prio];
316 void *qpage;
317 int rc;
318
319 if (WARN_ON(q->qpage))
320 return 0;
321
322
323 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
324 if (!qpage) {
325 pr_err("Failed to allocate queue %d for VCPU %d\n",
326 prio, xc->server_num);
327 return -ENOMEM;
328 }
329 memset(qpage, 0, 1 << xive->q_order);
330
331
332
333
334
335
336
337
338 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
339 xive->q_order, true);
340 if (rc)
341 pr_err("Failed to configure queue %d for VCPU %d\n",
342 prio, xc->server_num);
343 return rc;
344}
345
346
347static int xive_check_provisioning(struct kvm *kvm, u8 prio)
348{
349 struct kvmppc_xive *xive = kvm->arch.xive;
350 struct kvm_vcpu *vcpu;
351 int i, rc;
352
353 lockdep_assert_held(&xive->lock);
354
355
356 if (xive->qmap & (1 << prio))
357 return 0;
358
359 pr_devel("Provisioning prio... %d\n", prio);
360
361
362 kvm_for_each_vcpu(i, vcpu, kvm) {
363 if (!vcpu->arch.xive_vcpu)
364 continue;
365 rc = xive_provision_queue(vcpu, prio);
366 if (rc == 0 && !xive->single_escalation)
367 kvmppc_xive_attach_escalation(vcpu, prio,
368 xive->single_escalation);
369 if (rc)
370 return rc;
371 }
372
373
374 mb();
375 xive->qmap |= (1 << prio);
376 return 0;
377}
378
379static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
380{
381 struct kvm_vcpu *vcpu;
382 struct kvmppc_xive_vcpu *xc;
383 struct xive_q *q;
384
385
386 vcpu = kvmppc_xive_find_server(kvm, server);
387 if (!vcpu) {
388 pr_warn("%s: Can't find server %d\n", __func__, server);
389 return;
390 }
391 xc = vcpu->arch.xive_vcpu;
392 if (WARN_ON(!xc))
393 return;
394
395 q = &xc->queues[prio];
396 atomic_inc(&q->pending_count);
397}
398
399static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
400{
401 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
402 struct xive_q *q;
403 u32 max;
404
405 if (WARN_ON(!xc))
406 return -ENXIO;
407 if (!xc->valid)
408 return -ENXIO;
409
410 q = &xc->queues[prio];
411 if (WARN_ON(!q->qpage))
412 return -ENXIO;
413
414
415 max = (q->msk + 1) - XIVE_Q_GAP;
416 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
417}
418
419int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
420{
421 struct kvm_vcpu *vcpu;
422 int i, rc;
423
424
425 vcpu = kvmppc_xive_find_server(kvm, *server);
426 if (!vcpu) {
427 pr_devel("Can't find server %d\n", *server);
428 return -EINVAL;
429 }
430
431 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
432
433
434 rc = xive_try_pick_queue(vcpu, prio);
435 if (rc == 0)
436 return rc;
437
438 pr_devel(" .. failed, looking up candidate...\n");
439
440
441 kvm_for_each_vcpu(i, vcpu, kvm) {
442 if (!vcpu->arch.xive_vcpu)
443 continue;
444 rc = xive_try_pick_queue(vcpu, prio);
445 if (rc == 0) {
446 *server = vcpu->arch.xive_vcpu->server_num;
447 pr_devel(" found on 0x%x/%d\n", *server, prio);
448 return rc;
449 }
450 }
451 pr_devel(" no available target !\n");
452
453
454 return -EBUSY;
455}
456
457static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
458 struct kvmppc_xive_src_block *sb,
459 struct kvmppc_xive_irq_state *state)
460{
461 struct xive_irq_data *xd;
462 u32 hw_num;
463 u8 old_prio;
464 u64 val;
465
466
467
468
469
470 for (;;) {
471 arch_spin_lock(&sb->lock);
472 old_prio = state->guest_priority;
473 state->guest_priority = MASKED;
474 mb();
475 if (!state->in_eoi)
476 break;
477 state->guest_priority = old_prio;
478 arch_spin_unlock(&sb->lock);
479 }
480
481
482 if (old_prio == MASKED)
483 return old_prio;
484
485
486 kvmppc_xive_select_irq(state, &hw_num, &xd);
487
488
489 val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
490 state->old_p = !!(val & 2);
491 state->old_q = !!(val & 1);
492
493
494
495
496
497 xive_native_sync_source(hw_num);
498
499 return old_prio;
500}
501
502static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
503 struct kvmppc_xive_irq_state *state)
504{
505
506
507
508 for (;;) {
509 arch_spin_lock(&sb->lock);
510 if (!state->in_eoi)
511 break;
512 arch_spin_unlock(&sb->lock);
513 }
514}
515
516static void xive_finish_unmask(struct kvmppc_xive *xive,
517 struct kvmppc_xive_src_block *sb,
518 struct kvmppc_xive_irq_state *state,
519 u8 prio)
520{
521 struct xive_irq_data *xd;
522 u32 hw_num;
523
524
525 if (state->guest_priority != MASKED)
526 goto bail;
527
528
529 kvmppc_xive_select_irq(state, &hw_num, &xd);
530
531
532 if (state->old_q)
533 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
534
535
536
537
538
539
540 if (!state->old_p)
541 xive_vm_source_eoi(hw_num, xd);
542
543
544 mb();
545bail:
546 state->guest_priority = prio;
547}
548
549
550
551
552
553
554
555
556static int xive_target_interrupt(struct kvm *kvm,
557 struct kvmppc_xive_irq_state *state,
558 u32 server, u8 prio)
559{
560 struct kvmppc_xive *xive = kvm->arch.xive;
561 u32 hw_num;
562 int rc;
563
564
565
566
567
568
569 rc = kvmppc_xive_select_target(kvm, &server, prio);
570
571
572
573
574
575 if (rc)
576 return rc;
577
578
579
580
581
582
583 if (state->act_priority != MASKED)
584 xive_inc_q_pending(kvm,
585 state->act_server,
586 state->act_priority);
587
588
589
590 state->act_priority = prio;
591 state->act_server = server;
592
593
594 kvmppc_xive_select_irq(state, &hw_num, NULL);
595
596 return xive_native_configure_irq(hw_num,
597 kvmppc_xive_vp(xive, server),
598 prio, state->number);
599}
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
642 u32 priority)
643{
644 struct kvmppc_xive *xive = kvm->arch.xive;
645 struct kvmppc_xive_src_block *sb;
646 struct kvmppc_xive_irq_state *state;
647 u8 new_act_prio;
648 int rc = 0;
649 u16 idx;
650
651 if (!xive)
652 return -ENODEV;
653
654 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
655 irq, server, priority);
656
657
658 if (priority != MASKED) {
659 mutex_lock(&xive->lock);
660 rc = xive_check_provisioning(xive->kvm,
661 xive_prio_from_guest(priority));
662 mutex_unlock(&xive->lock);
663 }
664 if (rc) {
665 pr_devel(" provisioning failure %d !\n", rc);
666 return rc;
667 }
668
669 sb = kvmppc_xive_find_source(xive, irq, &idx);
670 if (!sb)
671 return -EINVAL;
672 state = &sb->irq_state[idx];
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688 if (priority == MASKED)
689 xive_lock_and_mask(xive, sb, state);
690 else
691 xive_lock_for_unmask(sb, state);
692
693
694
695
696
697
698
699 new_act_prio = state->act_priority;
700 if (priority != MASKED)
701 new_act_prio = xive_prio_from_guest(priority);
702
703 pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
704 new_act_prio, state->act_server, state->act_priority);
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720 if (new_act_prio != MASKED &&
721 (state->act_server != server ||
722 state->act_priority != new_act_prio))
723 rc = xive_target_interrupt(kvm, state, server, new_act_prio);
724
725
726
727
728
729 if (priority != MASKED)
730 xive_finish_unmask(xive, sb, state, priority);
731
732
733
734
735
736 state->saved_priority = priority;
737
738 arch_spin_unlock(&sb->lock);
739 return rc;
740}
741
742int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
743 u32 *priority)
744{
745 struct kvmppc_xive *xive = kvm->arch.xive;
746 struct kvmppc_xive_src_block *sb;
747 struct kvmppc_xive_irq_state *state;
748 u16 idx;
749
750 if (!xive)
751 return -ENODEV;
752
753 sb = kvmppc_xive_find_source(xive, irq, &idx);
754 if (!sb)
755 return -EINVAL;
756 state = &sb->irq_state[idx];
757 arch_spin_lock(&sb->lock);
758 *server = state->act_server;
759 *priority = state->guest_priority;
760 arch_spin_unlock(&sb->lock);
761
762 return 0;
763}
764
765int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
766{
767 struct kvmppc_xive *xive = kvm->arch.xive;
768 struct kvmppc_xive_src_block *sb;
769 struct kvmppc_xive_irq_state *state;
770 u16 idx;
771
772 if (!xive)
773 return -ENODEV;
774
775 sb = kvmppc_xive_find_source(xive, irq, &idx);
776 if (!sb)
777 return -EINVAL;
778 state = &sb->irq_state[idx];
779
780 pr_devel("int_on(irq=0x%x)\n", irq);
781
782
783
784
785 if (state->act_priority == MASKED) {
786 pr_devel("int_on on untargetted interrupt\n");
787 return -EINVAL;
788 }
789
790
791 if (state->saved_priority == MASKED)
792 return 0;
793
794
795
796
797 xive_lock_for_unmask(sb, state);
798 xive_finish_unmask(xive, sb, state, state->saved_priority);
799 arch_spin_unlock(&sb->lock);
800
801 return 0;
802}
803
804int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
805{
806 struct kvmppc_xive *xive = kvm->arch.xive;
807 struct kvmppc_xive_src_block *sb;
808 struct kvmppc_xive_irq_state *state;
809 u16 idx;
810
811 if (!xive)
812 return -ENODEV;
813
814 sb = kvmppc_xive_find_source(xive, irq, &idx);
815 if (!sb)
816 return -EINVAL;
817 state = &sb->irq_state[idx];
818
819 pr_devel("int_off(irq=0x%x)\n", irq);
820
821
822
823
824 state->saved_priority = xive_lock_and_mask(xive, sb, state);
825 arch_spin_unlock(&sb->lock);
826
827 return 0;
828}
829
830static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
831{
832 struct kvmppc_xive_src_block *sb;
833 struct kvmppc_xive_irq_state *state;
834 u16 idx;
835
836 sb = kvmppc_xive_find_source(xive, irq, &idx);
837 if (!sb)
838 return false;
839 state = &sb->irq_state[idx];
840 if (!state->valid)
841 return false;
842
843
844
845
846
847 xive_irq_trigger(&state->ipi_data);
848
849 return true;
850}
851
852u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
853{
854 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
855
856 if (!xc)
857 return 0;
858
859
860 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
861 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
862 (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
863}
864
865int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
866{
867 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
868 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
869 u8 cppr, mfrr;
870 u32 xisr;
871
872 if (!xc || !xive)
873 return -ENOENT;
874
875
876 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
877 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
878 KVM_REG_PPC_ICP_XISR_MASK;
879 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
880
881 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
882 xc->server_num, cppr, mfrr, xisr);
883
884
885
886
887
888
889 if (WARN_ON(vcpu->arch.xive_pushed))
890 return -EIO;
891
892
893 vcpu->arch.xive_saved_state.cppr = cppr;
894 xc->hw_cppr = xc->cppr = cppr;
895
896
897
898
899
900
901
902 xc->mfrr = mfrr;
903 if (mfrr < cppr)
904 xive_irq_trigger(&xc->vp_ipi_data);
905
906
907
908
909
910
911
912
913
914
915 if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
916 xc->delayed_irq = xisr;
917 xive->delayed_irqs++;
918 pr_devel(" xisr restore delayed\n");
919 }
920
921 return 0;
922}
923
924int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
925 struct irq_desc *host_desc)
926{
927 struct kvmppc_xive *xive = kvm->arch.xive;
928 struct kvmppc_xive_src_block *sb;
929 struct kvmppc_xive_irq_state *state;
930 struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
931 unsigned int host_irq = irq_desc_get_irq(host_desc);
932 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
933 u16 idx;
934 u8 prio;
935 int rc;
936
937 if (!xive)
938 return -ENODEV;
939
940 pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
941
942 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
943 if (!sb)
944 return -EINVAL;
945 state = &sb->irq_state[idx];
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960 rc = irq_set_vcpu_affinity(host_irq, state);
961 if (rc) {
962 pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
963 return rc;
964 }
965
966
967
968
969
970
971 prio = xive_lock_and_mask(xive, sb, state);
972 pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
973 state->old_p, state->old_q);
974
975
976 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
977
978
979
980
981
982 if (xive->ops && xive->ops->reset_mapped)
983 xive->ops->reset_mapped(kvm, guest_irq);
984
985
986 state->pt_number = hw_irq;
987 state->pt_data = irq_data_get_irq_handler_data(host_data);
988
989
990
991
992
993
994
995 xive_native_configure_irq(hw_irq,
996 kvmppc_xive_vp(xive, state->act_server),
997 state->act_priority, state->number);
998
999
1000
1001
1002
1003
1004
1005
1006 if (prio != MASKED && !state->old_p)
1007 xive_vm_source_eoi(hw_irq, state->pt_data);
1008
1009
1010 state->old_p = state->old_q = false;
1011
1012
1013 mb();
1014 state->guest_priority = prio;
1015 arch_spin_unlock(&sb->lock);
1016
1017 return 0;
1018}
1019EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
1020
1021int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
1022 struct irq_desc *host_desc)
1023{
1024 struct kvmppc_xive *xive = kvm->arch.xive;
1025 struct kvmppc_xive_src_block *sb;
1026 struct kvmppc_xive_irq_state *state;
1027 unsigned int host_irq = irq_desc_get_irq(host_desc);
1028 u16 idx;
1029 u8 prio;
1030 int rc;
1031
1032 if (!xive)
1033 return -ENODEV;
1034
1035 pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
1036
1037 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
1038 if (!sb)
1039 return -EINVAL;
1040 state = &sb->irq_state[idx];
1041
1042
1043
1044
1045
1046
1047 prio = xive_lock_and_mask(xive, sb, state);
1048 pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
1049 state->old_p, state->old_q);
1050
1051
1052
1053
1054
1055
1056 if (state->old_p)
1057 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
1058
1059
1060 rc = irq_set_vcpu_affinity(host_irq, NULL);
1061 if (rc) {
1062 pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
1063 return rc;
1064 }
1065
1066
1067 state->pt_number = 0;
1068 state->pt_data = NULL;
1069
1070
1071
1072
1073
1074 if (xive->ops && xive->ops->reset_mapped) {
1075 xive->ops->reset_mapped(kvm, guest_irq);
1076 }
1077
1078
1079 xive_native_configure_irq(state->ipi_number,
1080 kvmppc_xive_vp(xive, state->act_server),
1081 state->act_priority, state->number);
1082
1083
1084
1085
1086
1087
1088 if (prio == MASKED || state->old_p)
1089 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
1090 else
1091 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
1092
1093
1094 mb();
1095 state->guest_priority = prio;
1096 arch_spin_unlock(&sb->lock);
1097
1098 return 0;
1099}
1100EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
1101
1102void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
1103{
1104 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1105 struct kvm *kvm = vcpu->kvm;
1106 struct kvmppc_xive *xive = kvm->arch.xive;
1107 int i, j;
1108
1109 for (i = 0; i <= xive->max_sbid; i++) {
1110 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1111
1112 if (!sb)
1113 continue;
1114 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
1115 struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
1116
1117 if (!state->valid)
1118 continue;
1119 if (state->act_priority == MASKED)
1120 continue;
1121 if (state->act_server != xc->server_num)
1122 continue;
1123
1124
1125 arch_spin_lock(&sb->lock);
1126 state->act_priority = MASKED;
1127 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1128 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
1129 if (state->pt_number) {
1130 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
1131 xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
1132 }
1133 arch_spin_unlock(&sb->lock);
1134 }
1135 }
1136
1137
1138 if (vcpu->arch.xive_esc_on) {
1139 __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
1140 XIVE_ESB_SET_PQ_01));
1141 vcpu->arch.xive_esc_on = false;
1142 }
1143
1144
1145
1146
1147
1148
1149 vcpu->arch.xive_esc_vaddr = 0;
1150 vcpu->arch.xive_esc_raddr = 0;
1151}
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
1162 struct kvmppc_xive_vcpu *xc, int irq)
1163{
1164 struct irq_data *d = irq_get_irq_data(irq);
1165 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1166
1167
1168
1169
1170
1171
1172 xd->stale_p = false;
1173 smp_mb();
1174 if (!vcpu->arch.xive_esc_on)
1175 xd->stale_p = true;
1176}
1177
1178void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1179{
1180 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1181 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1182 int i;
1183
1184 if (!kvmppc_xics_enabled(vcpu))
1185 return;
1186
1187 if (!xc)
1188 return;
1189
1190 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
1191
1192
1193 xc->valid = false;
1194 kvmppc_xive_disable_vcpu_interrupts(vcpu);
1195
1196
1197 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1198
1199
1200 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1201 if (xc->esc_virq[i]) {
1202 if (xc->xive->single_escalation)
1203 xive_cleanup_single_escalation(vcpu, xc,
1204 xc->esc_virq[i]);
1205 free_irq(xc->esc_virq[i], vcpu);
1206 irq_dispose_mapping(xc->esc_virq[i]);
1207 kfree(xc->esc_virq_names[i]);
1208 }
1209 }
1210
1211
1212 xive_native_disable_vp(xc->vp_id);
1213
1214
1215 vcpu->arch.xive_cam_word = 0;
1216
1217
1218 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1219 struct xive_q *q = &xc->queues[i];
1220
1221 xive_native_disable_queue(xc->vp_id, q, i);
1222 if (q->qpage) {
1223 free_pages((unsigned long)q->qpage,
1224 xive->q_page_order);
1225 q->qpage = NULL;
1226 }
1227 }
1228
1229
1230 if (xc->vp_ipi) {
1231 xive_cleanup_irq_data(&xc->vp_ipi_data);
1232 xive_native_free_irq(xc->vp_ipi);
1233 }
1234
1235 kfree(xc);
1236
1237
1238 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1239 vcpu->arch.xive_vcpu = NULL;
1240}
1241
1242static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
1243{
1244
1245
1246
1247 return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers;
1248}
1249
1250int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
1251{
1252 u32 vp_id;
1253
1254 if (!kvmppc_xive_vcpu_id_valid(xive, cpu)) {
1255 pr_devel("Out of bounds !\n");
1256 return -EINVAL;
1257 }
1258
1259 if (xive->vp_base == XIVE_INVALID_VP) {
1260 xive->vp_base = xive_native_alloc_vp_block(xive->nr_servers);
1261 pr_devel("VP_Base=%x nr_servers=%d\n", xive->vp_base, xive->nr_servers);
1262
1263 if (xive->vp_base == XIVE_INVALID_VP)
1264 return -ENOSPC;
1265 }
1266
1267 vp_id = kvmppc_xive_vp(xive, cpu);
1268 if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
1269 pr_devel("Duplicate !\n");
1270 return -EEXIST;
1271 }
1272
1273 *vp = vp_id;
1274
1275 return 0;
1276}
1277
1278int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1279 struct kvm_vcpu *vcpu, u32 cpu)
1280{
1281 struct kvmppc_xive *xive = dev->private;
1282 struct kvmppc_xive_vcpu *xc;
1283 int i, r = -EBUSY;
1284 u32 vp_id;
1285
1286 pr_devel("connect_vcpu(cpu=%d)\n", cpu);
1287
1288 if (dev->ops != &kvm_xive_ops) {
1289 pr_devel("Wrong ops !\n");
1290 return -EPERM;
1291 }
1292 if (xive->kvm != vcpu->kvm)
1293 return -EPERM;
1294 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
1295 return -EBUSY;
1296
1297
1298 mutex_lock(&xive->lock);
1299
1300 r = kvmppc_xive_compute_vp_id(xive, cpu, &vp_id);
1301 if (r)
1302 goto bail;
1303
1304 xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1305 if (!xc) {
1306 r = -ENOMEM;
1307 goto bail;
1308 }
1309
1310 vcpu->arch.xive_vcpu = xc;
1311 xc->xive = xive;
1312 xc->vcpu = vcpu;
1313 xc->server_num = cpu;
1314 xc->vp_id = vp_id;
1315 xc->mfrr = 0xff;
1316 xc->valid = true;
1317
1318 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1319 if (r)
1320 goto bail;
1321
1322
1323 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
1324 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1325
1326
1327 xc->vp_ipi = xive_native_alloc_irq();
1328 if (!xc->vp_ipi) {
1329 pr_err("Failed to allocate xive irq for VCPU IPI\n");
1330 r = -EIO;
1331 goto bail;
1332 }
1333 pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1334
1335 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1336 if (r)
1337 goto bail;
1338
1339
1340
1341
1342
1343 r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
1344 if (r) {
1345 pr_err("Failed to enable VP in OPAL, err %d\n", r);
1346 goto bail;
1347 }
1348
1349
1350
1351
1352
1353
1354
1355
1356 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1357 struct xive_q *q = &xc->queues[i];
1358
1359
1360 if (i == 7 && xive->single_escalation)
1361 break;
1362
1363
1364 if (xive->qmap & (1 << i)) {
1365 r = xive_provision_queue(vcpu, i);
1366 if (r == 0 && !xive->single_escalation)
1367 kvmppc_xive_attach_escalation(
1368 vcpu, i, xive->single_escalation);
1369 if (r)
1370 goto bail;
1371 } else {
1372 r = xive_native_configure_queue(xc->vp_id,
1373 q, i, NULL, 0, true);
1374 if (r) {
1375 pr_err("Failed to configure queue %d for VCPU %d\n",
1376 i, cpu);
1377 goto bail;
1378 }
1379 }
1380 }
1381
1382
1383 r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation);
1384 if (r)
1385 goto bail;
1386
1387
1388 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
1389 if (!r)
1390 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
1391
1392bail:
1393 mutex_unlock(&xive->lock);
1394 if (r) {
1395 kvmppc_xive_cleanup_vcpu(vcpu);
1396 return r;
1397 }
1398
1399 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1400 return 0;
1401}
1402
1403
1404
1405
1406static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
1407{
1408 struct kvmppc_xive_src_block *sb;
1409 struct kvmppc_xive_irq_state *state;
1410 u16 idx;
1411
1412 sb = kvmppc_xive_find_source(xive, irq, &idx);
1413 if (!sb)
1414 return;
1415
1416 state = &sb->irq_state[idx];
1417
1418
1419 if (!state->valid) {
1420 pr_err("invalid irq 0x%x in cpu queue!\n", irq);
1421 return;
1422 }
1423
1424
1425
1426
1427
1428
1429 if (!state->saved_p)
1430 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
1431
1432
1433 state->in_queue = true;
1434}
1435
1436static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
1437 struct kvmppc_xive_src_block *sb,
1438 u32 irq)
1439{
1440 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1441
1442 if (!state->valid)
1443 return;
1444
1445
1446 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
1447
1448
1449 state->saved_p = state->old_p;
1450 state->saved_q = state->old_q;
1451
1452
1453 arch_spin_unlock(&sb->lock);
1454}
1455
1456static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
1457 struct kvmppc_xive_src_block *sb,
1458 u32 irq)
1459{
1460 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1461
1462 if (!state->valid)
1463 return;
1464
1465
1466
1467
1468
1469
1470 xive_lock_for_unmask(sb, state);
1471
1472
1473 if (state->saved_scan_prio != MASKED)
1474 xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
1475
1476
1477 arch_spin_unlock(&sb->lock);
1478}
1479
1480static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
1481{
1482 u32 idx = q->idx;
1483 u32 toggle = q->toggle;
1484 u32 irq;
1485
1486 do {
1487 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
1488 if (irq > XICS_IPI)
1489 xive_pre_save_set_queued(xive, irq);
1490 } while(irq);
1491}
1492
1493static void xive_pre_save_scan(struct kvmppc_xive *xive)
1494{
1495 struct kvm_vcpu *vcpu = NULL;
1496 int i, j;
1497
1498
1499
1500
1501
1502 for (i = 0; i <= xive->max_sbid; i++) {
1503 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1504 if (!sb)
1505 continue;
1506 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1507 xive_pre_save_mask_irq(xive, sb, j);
1508 }
1509
1510
1511 kvm_for_each_vcpu(i, vcpu, xive->kvm) {
1512 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1513 if (!xc)
1514 continue;
1515 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
1516 if (xc->queues[j].qpage)
1517 xive_pre_save_queue(xive, &xc->queues[j]);
1518 }
1519 }
1520
1521
1522 for (i = 0; i <= xive->max_sbid; i++) {
1523 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1524 if (!sb)
1525 continue;
1526 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1527 xive_pre_save_unmask_irq(xive, sb, j);
1528 }
1529}
1530
1531static void xive_post_save_scan(struct kvmppc_xive *xive)
1532{
1533 u32 i, j;
1534
1535
1536 for (i = 0; i <= xive->max_sbid; i++) {
1537 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1538 if (!sb)
1539 continue;
1540 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1541 sb->irq_state[j].in_queue = false;
1542 }
1543
1544
1545 xive->saved_src_count = 0;
1546}
1547
1548
1549
1550
1551static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1552{
1553 struct kvmppc_xive_src_block *sb;
1554 struct kvmppc_xive_irq_state *state;
1555 u64 __user *ubufp = (u64 __user *) addr;
1556 u64 val, prio;
1557 u16 idx;
1558
1559 sb = kvmppc_xive_find_source(xive, irq, &idx);
1560 if (!sb)
1561 return -ENOENT;
1562
1563 state = &sb->irq_state[idx];
1564
1565 if (!state->valid)
1566 return -ENOENT;
1567
1568 pr_devel("get_source(%ld)...\n", irq);
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586 if (xive->saved_src_count == 0)
1587 xive_pre_save_scan(xive);
1588 xive->saved_src_count++;
1589
1590
1591 val = state->act_server;
1592 prio = state->saved_scan_prio;
1593
1594 if (prio == MASKED) {
1595 val |= KVM_XICS_MASKED;
1596 prio = state->saved_priority;
1597 }
1598 val |= prio << KVM_XICS_PRIORITY_SHIFT;
1599 if (state->lsi) {
1600 val |= KVM_XICS_LEVEL_SENSITIVE;
1601 if (state->saved_p)
1602 val |= KVM_XICS_PENDING;
1603 } else {
1604 if (state->saved_p)
1605 val |= KVM_XICS_PRESENTED;
1606
1607 if (state->saved_q)
1608 val |= KVM_XICS_QUEUED;
1609
1610
1611
1612
1613
1614
1615
1616 if (state->in_queue || (prio == MASKED && state->saved_q))
1617 val |= KVM_XICS_PENDING;
1618 }
1619
1620
1621
1622
1623
1624 if (xive->saved_src_count == xive->src_count)
1625 xive_post_save_scan(xive);
1626
1627
1628 if (put_user(val, ubufp))
1629 return -EFAULT;
1630
1631 return 0;
1632}
1633
1634struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
1635 struct kvmppc_xive *xive, int irq)
1636{
1637 struct kvmppc_xive_src_block *sb;
1638 int i, bid;
1639
1640 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
1641
1642 mutex_lock(&xive->lock);
1643
1644
1645 if (xive->src_blocks[bid])
1646 goto out;
1647
1648
1649 sb = kzalloc(sizeof(*sb), GFP_KERNEL);
1650 if (!sb)
1651 goto out;
1652
1653 sb->id = bid;
1654
1655 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1656 sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
1657 sb->irq_state[i].eisn = 0;
1658 sb->irq_state[i].guest_priority = MASKED;
1659 sb->irq_state[i].saved_priority = MASKED;
1660 sb->irq_state[i].act_priority = MASKED;
1661 }
1662 smp_wmb();
1663 xive->src_blocks[bid] = sb;
1664
1665 if (bid > xive->max_sbid)
1666 xive->max_sbid = bid;
1667
1668out:
1669 mutex_unlock(&xive->lock);
1670 return xive->src_blocks[bid];
1671}
1672
1673static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
1674{
1675 struct kvm *kvm = xive->kvm;
1676 struct kvm_vcpu *vcpu = NULL;
1677 int i;
1678
1679 kvm_for_each_vcpu(i, vcpu, kvm) {
1680 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1681
1682 if (!xc)
1683 continue;
1684
1685 if (xc->delayed_irq == irq) {
1686 xc->delayed_irq = 0;
1687 xive->delayed_irqs--;
1688 return true;
1689 }
1690 }
1691 return false;
1692}
1693
1694static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1695{
1696 struct kvmppc_xive_src_block *sb;
1697 struct kvmppc_xive_irq_state *state;
1698 u64 __user *ubufp = (u64 __user *) addr;
1699 u16 idx;
1700 u64 val;
1701 u8 act_prio, guest_prio;
1702 u32 server;
1703 int rc = 0;
1704
1705 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1706 return -ENOENT;
1707
1708 pr_devel("set_source(irq=0x%lx)\n", irq);
1709
1710
1711 sb = kvmppc_xive_find_source(xive, irq, &idx);
1712 if (!sb) {
1713 pr_devel("No source, creating source block...\n");
1714 sb = kvmppc_xive_create_src_block(xive, irq);
1715 if (!sb) {
1716 pr_devel("Failed to create block...\n");
1717 return -ENOMEM;
1718 }
1719 }
1720 state = &sb->irq_state[idx];
1721
1722
1723 if (get_user(val, ubufp)) {
1724 pr_devel("fault getting user info !\n");
1725 return -EFAULT;
1726 }
1727
1728 server = val & KVM_XICS_DESTINATION_MASK;
1729 guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
1730
1731 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1732 val, server, guest_prio);
1733
1734
1735
1736
1737
1738 if (!state->ipi_number) {
1739 state->ipi_number = xive_native_alloc_irq();
1740 if (state->ipi_number == 0) {
1741 pr_devel("Failed to allocate IPI !\n");
1742 return -ENOMEM;
1743 }
1744 xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
1745 pr_devel(" src_ipi=0x%x\n", state->ipi_number);
1746 }
1747
1748
1749
1750
1751
1752
1753
1754
1755 state->guest_priority = 0;
1756 xive_lock_and_mask(xive, sb, state);
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766 act_prio = xive_prio_from_guest(guest_prio);
1767 state->act_priority = MASKED;
1768
1769
1770
1771
1772
1773
1774 arch_spin_unlock(&sb->lock);
1775
1776
1777 if (act_prio != MASKED) {
1778
1779 mutex_lock(&xive->lock);
1780 rc = xive_check_provisioning(xive->kvm, act_prio);
1781 mutex_unlock(&xive->lock);
1782
1783
1784 if (rc == 0)
1785 rc = xive_target_interrupt(xive->kvm, state,
1786 server, act_prio);
1787
1788
1789
1790
1791
1792 }
1793
1794
1795
1796
1797
1798 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
1799 val |= KVM_XICS_PENDING;
1800 pr_devel(" Found delayed ! forcing PENDING !\n");
1801 }
1802
1803
1804 state->old_p = false;
1805 state->old_q = false;
1806 state->lsi = false;
1807 state->asserted = false;
1808
1809
1810 if (val & KVM_XICS_LEVEL_SENSITIVE) {
1811 state->lsi = true;
1812 if (val & KVM_XICS_PENDING)
1813 state->asserted = true;
1814 pr_devel(" LSI ! Asserted=%d\n", state->asserted);
1815 }
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827 if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
1828 state->old_p = true;
1829 if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
1830 state->old_q = true;
1831
1832 pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q);
1833
1834
1835
1836
1837
1838
1839 if (val & KVM_XICS_MASKED) {
1840 pr_devel(" masked, saving prio\n");
1841 state->guest_priority = MASKED;
1842 state->saved_priority = guest_prio;
1843 } else {
1844 pr_devel(" unmasked, restoring to prio %d\n", guest_prio);
1845 xive_finish_unmask(xive, sb, state, guest_prio);
1846 state->saved_priority = guest_prio;
1847 }
1848
1849
1850 if (!state->valid)
1851 xive->src_count++;
1852 state->valid = true;
1853
1854 return 0;
1855}
1856
1857int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1858 bool line_status)
1859{
1860 struct kvmppc_xive *xive = kvm->arch.xive;
1861 struct kvmppc_xive_src_block *sb;
1862 struct kvmppc_xive_irq_state *state;
1863 u16 idx;
1864
1865 if (!xive)
1866 return -ENODEV;
1867
1868 sb = kvmppc_xive_find_source(xive, irq, &idx);
1869 if (!sb)
1870 return -EINVAL;
1871
1872
1873 state = &sb->irq_state[idx];
1874 if (!state->valid)
1875 return -EINVAL;
1876
1877
1878 if (state->pt_number)
1879 return -EINVAL;
1880
1881 if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
1882 state->asserted = true;
1883 else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
1884 state->asserted = false;
1885 return 0;
1886 }
1887
1888
1889 xive_irq_trigger(&state->ipi_data);
1890
1891 return 0;
1892}
1893
1894int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr)
1895{
1896 u32 __user *ubufp = (u32 __user *) addr;
1897 u32 nr_servers;
1898 int rc = 0;
1899
1900 if (get_user(nr_servers, ubufp))
1901 return -EFAULT;
1902
1903 pr_devel("%s nr_servers=%u\n", __func__, nr_servers);
1904
1905 if (!nr_servers || nr_servers > KVM_MAX_VCPU_ID)
1906 return -EINVAL;
1907
1908 mutex_lock(&xive->lock);
1909 if (xive->vp_base != XIVE_INVALID_VP)
1910
1911
1912
1913
1914
1915
1916
1917 rc = -EBUSY;
1918 else if (nr_servers > KVM_MAX_VCPUS)
1919
1920
1921
1922 xive->nr_servers = KVM_MAX_VCPUS;
1923 else
1924 xive->nr_servers = nr_servers;
1925
1926 mutex_unlock(&xive->lock);
1927
1928 return rc;
1929}
1930
1931static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1932{
1933 struct kvmppc_xive *xive = dev->private;
1934
1935
1936 switch (attr->group) {
1937 case KVM_DEV_XICS_GRP_SOURCES:
1938 return xive_set_source(xive, attr->attr, attr->addr);
1939 case KVM_DEV_XICS_GRP_CTRL:
1940 switch (attr->attr) {
1941 case KVM_DEV_XICS_NR_SERVERS:
1942 return kvmppc_xive_set_nr_servers(xive, attr->addr);
1943 }
1944 }
1945 return -ENXIO;
1946}
1947
1948static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1949{
1950 struct kvmppc_xive *xive = dev->private;
1951
1952
1953 switch (attr->group) {
1954 case KVM_DEV_XICS_GRP_SOURCES:
1955 return xive_get_source(xive, attr->attr, attr->addr);
1956 }
1957 return -ENXIO;
1958}
1959
1960static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1961{
1962
1963 switch (attr->group) {
1964 case KVM_DEV_XICS_GRP_SOURCES:
1965 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1966 attr->attr < KVMPPC_XICS_NR_IRQS)
1967 return 0;
1968 break;
1969 case KVM_DEV_XICS_GRP_CTRL:
1970 switch (attr->attr) {
1971 case KVM_DEV_XICS_NR_SERVERS:
1972 return 0;
1973 }
1974 }
1975 return -ENXIO;
1976}
1977
1978static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
1979{
1980 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
1981 xive_native_configure_irq(hw_num, 0, MASKED, 0);
1982}
1983
1984void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
1985{
1986 int i;
1987
1988 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1989 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
1990
1991 if (!state->valid)
1992 continue;
1993
1994 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
1995 xive_cleanup_irq_data(&state->ipi_data);
1996 xive_native_free_irq(state->ipi_number);
1997
1998
1999 if (state->pt_number)
2000 kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
2001
2002 state->valid = false;
2003 }
2004}
2005
2006
2007
2008
2009static void kvmppc_xive_release(struct kvm_device *dev)
2010{
2011 struct kvmppc_xive *xive = dev->private;
2012 struct kvm *kvm = xive->kvm;
2013 struct kvm_vcpu *vcpu;
2014 int i;
2015
2016 pr_devel("Releasing xive device\n");
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027 debugfs_remove(xive->dentry);
2028
2029
2030
2031
2032 kvm_for_each_vcpu(i, vcpu, kvm) {
2033
2034
2035
2036
2037
2038
2039
2040
2041 mutex_lock(&vcpu->mutex);
2042 kvmppc_xive_cleanup_vcpu(vcpu);
2043 mutex_unlock(&vcpu->mutex);
2044 }
2045
2046
2047
2048
2049
2050
2051
2052 kvm->arch.xive = NULL;
2053
2054
2055 for (i = 0; i <= xive->max_sbid; i++) {
2056 if (xive->src_blocks[i])
2057 kvmppc_xive_free_sources(xive->src_blocks[i]);
2058 kfree(xive->src_blocks[i]);
2059 xive->src_blocks[i] = NULL;
2060 }
2061
2062 if (xive->vp_base != XIVE_INVALID_VP)
2063 xive_native_free_vp_block(xive->vp_base);
2064
2065
2066
2067
2068
2069
2070
2071
2072 kfree(dev);
2073}
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type)
2085{
2086 struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ?
2087 &kvm->arch.xive_devices.native :
2088 &kvm->arch.xive_devices.xics_on_xive;
2089 struct kvmppc_xive *xive = *kvm_xive_device;
2090
2091 if (!xive) {
2092 xive = kzalloc(sizeof(*xive), GFP_KERNEL);
2093 *kvm_xive_device = xive;
2094 } else {
2095 memset(xive, 0, sizeof(*xive));
2096 }
2097
2098 return xive;
2099}
2100
2101
2102
2103
2104static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
2105{
2106 struct kvmppc_xive *xive;
2107 struct kvm *kvm = dev->kvm;
2108
2109 pr_devel("Creating xive for partition\n");
2110
2111
2112 if (kvm->arch.xive)
2113 return -EEXIST;
2114
2115 xive = kvmppc_xive_get_device(kvm, type);
2116 if (!xive)
2117 return -ENOMEM;
2118
2119 dev->private = xive;
2120 xive->dev = dev;
2121 xive->kvm = kvm;
2122 mutex_init(&xive->lock);
2123
2124
2125 xive->q_order = xive_native_default_eq_shift();
2126 if (xive->q_order < PAGE_SHIFT)
2127 xive->q_page_order = 0;
2128 else
2129 xive->q_page_order = xive->q_order - PAGE_SHIFT;
2130
2131
2132 xive->vp_base = XIVE_INVALID_VP;
2133
2134
2135
2136 xive->nr_servers = KVM_MAX_VCPUS;
2137
2138 xive->single_escalation = xive_native_has_single_escalation();
2139
2140 kvm->arch.xive = xive;
2141 return 0;
2142}
2143
2144int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
2145{
2146 struct kvmppc_vcore *vc = vcpu->arch.vcore;
2147
2148
2149 if (!kvmppc_xics_enabled(vcpu))
2150 return H_TOO_HARD;
2151
2152 switch (req) {
2153 case H_XIRR:
2154 return xive_vm_h_xirr(vcpu);
2155 case H_CPPR:
2156 return xive_vm_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
2157 case H_EOI:
2158 return xive_vm_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
2159 case H_IPI:
2160 return xive_vm_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
2161 kvmppc_get_gpr(vcpu, 5));
2162 case H_IPOLL:
2163 return xive_vm_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
2164 case H_XIRR_X:
2165 xive_vm_h_xirr(vcpu);
2166 kvmppc_set_gpr(vcpu, 5, get_tb() + vc->tb_offset);
2167 return H_SUCCESS;
2168 }
2169
2170 return H_UNSUPPORTED;
2171}
2172EXPORT_SYMBOL_GPL(kvmppc_xive_xics_hcall);
2173
2174int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
2175{
2176 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2177 unsigned int i;
2178
2179 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
2180 struct xive_q *q = &xc->queues[i];
2181 u32 i0, i1, idx;
2182
2183 if (!q->qpage && !xc->esc_virq[i])
2184 continue;
2185
2186 if (q->qpage) {
2187 seq_printf(m, " q[%d]: ", i);
2188 idx = q->idx;
2189 i0 = be32_to_cpup(q->qpage + idx);
2190 idx = (idx + 1) & q->msk;
2191 i1 = be32_to_cpup(q->qpage + idx);
2192 seq_printf(m, "T=%d %08x %08x...\n", q->toggle,
2193 i0, i1);
2194 }
2195 if (xc->esc_virq[i]) {
2196 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
2197 struct xive_irq_data *xd =
2198 irq_data_get_irq_handler_data(d);
2199 u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
2200
2201 seq_printf(m, " ESC %d %c%c EOI @%llx",
2202 xc->esc_virq[i],
2203 (pq & XIVE_ESB_VAL_P) ? 'P' : '-',
2204 (pq & XIVE_ESB_VAL_Q) ? 'Q' : '-',
2205 xd->eoi_page);
2206 seq_puts(m, "\n");
2207 }
2208 }
2209 return 0;
2210}
2211
2212void kvmppc_xive_debug_show_sources(struct seq_file *m,
2213 struct kvmppc_xive_src_block *sb)
2214{
2215 int i;
2216
2217 seq_puts(m, " LISN HW/CHIP TYPE PQ EISN CPU/PRIO\n");
2218 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
2219 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
2220 struct xive_irq_data *xd;
2221 u64 pq;
2222 u32 hw_num;
2223
2224 if (!state->valid)
2225 continue;
2226
2227 kvmppc_xive_select_irq(state, &hw_num, &xd);
2228
2229 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
2230
2231 seq_printf(m, "%08x %08x/%02x", state->number, hw_num,
2232 xd->src_chip);
2233 if (state->lsi)
2234 seq_printf(m, " %cLSI", state->asserted ? '^' : ' ');
2235 else
2236 seq_puts(m, " MSI");
2237
2238 seq_printf(m, " %s %c%c %08x % 4d/%d",
2239 state->ipi_number == hw_num ? "IPI" : " PT",
2240 pq & XIVE_ESB_VAL_P ? 'P' : '-',
2241 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
2242 state->eisn, state->act_server,
2243 state->act_priority);
2244
2245 seq_puts(m, "\n");
2246 }
2247}
2248
2249static int xive_debug_show(struct seq_file *m, void *private)
2250{
2251 struct kvmppc_xive *xive = m->private;
2252 struct kvm *kvm = xive->kvm;
2253 struct kvm_vcpu *vcpu;
2254 u64 t_rm_h_xirr = 0;
2255 u64 t_rm_h_ipoll = 0;
2256 u64 t_rm_h_cppr = 0;
2257 u64 t_rm_h_eoi = 0;
2258 u64 t_rm_h_ipi = 0;
2259 u64 t_vm_h_xirr = 0;
2260 u64 t_vm_h_ipoll = 0;
2261 u64 t_vm_h_cppr = 0;
2262 u64 t_vm_h_eoi = 0;
2263 u64 t_vm_h_ipi = 0;
2264 unsigned int i;
2265
2266 if (!kvm)
2267 return 0;
2268
2269 seq_puts(m, "=========\nVCPU state\n=========\n");
2270
2271 kvm_for_each_vcpu(i, vcpu, kvm) {
2272 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2273
2274 if (!xc)
2275 continue;
2276
2277 seq_printf(m, "VCPU %d: VP:%#x/%02x\n"
2278 " CPPR:%#x HWCPPR:%#x MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
2279 xc->server_num, xc->vp_id, xc->vp_chip_id,
2280 xc->cppr, xc->hw_cppr,
2281 xc->mfrr, xc->pending,
2282 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
2283
2284 kvmppc_xive_debug_show_queues(m, vcpu);
2285
2286 t_rm_h_xirr += xc->stat_rm_h_xirr;
2287 t_rm_h_ipoll += xc->stat_rm_h_ipoll;
2288 t_rm_h_cppr += xc->stat_rm_h_cppr;
2289 t_rm_h_eoi += xc->stat_rm_h_eoi;
2290 t_rm_h_ipi += xc->stat_rm_h_ipi;
2291 t_vm_h_xirr += xc->stat_vm_h_xirr;
2292 t_vm_h_ipoll += xc->stat_vm_h_ipoll;
2293 t_vm_h_cppr += xc->stat_vm_h_cppr;
2294 t_vm_h_eoi += xc->stat_vm_h_eoi;
2295 t_vm_h_ipi += xc->stat_vm_h_ipi;
2296 }
2297
2298 seq_puts(m, "Hcalls totals\n");
2299 seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
2300 seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
2301 seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
2302 seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
2303 seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
2304
2305 seq_puts(m, "=========\nSources\n=========\n");
2306
2307 for (i = 0; i <= xive->max_sbid; i++) {
2308 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
2309
2310 if (sb) {
2311 arch_spin_lock(&sb->lock);
2312 kvmppc_xive_debug_show_sources(m, sb);
2313 arch_spin_unlock(&sb->lock);
2314 }
2315 }
2316
2317 return 0;
2318}
2319
2320DEFINE_SHOW_ATTRIBUTE(xive_debug);
2321
2322static void xive_debugfs_init(struct kvmppc_xive *xive)
2323{
2324 char *name;
2325
2326 name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
2327 if (!name) {
2328 pr_err("%s: no memory for name\n", __func__);
2329 return;
2330 }
2331
2332 xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
2333 xive, &xive_debug_fops);
2334
2335 pr_debug("%s: created %s\n", __func__, name);
2336 kfree(name);
2337}
2338
2339static void kvmppc_xive_init(struct kvm_device *dev)
2340{
2341 struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
2342
2343
2344 xive_debugfs_init(xive);
2345}
2346
2347struct kvm_device_ops kvm_xive_ops = {
2348 .name = "kvm-xive",
2349 .create = kvmppc_xive_create,
2350 .init = kvmppc_xive_init,
2351 .release = kvmppc_xive_release,
2352 .set_attr = xive_set_attr,
2353 .get_attr = xive_get_attr,
2354 .has_attr = xive_has_attr,
2355};
2356