1
2
3
4
5
6#define pr_fmt(fmt) "xive-kvm: " fmt
7
8#include <linux/kernel.h>
9#include <linux/kvm_host.h>
10#include <linux/err.h>
11#include <linux/gfp.h>
12#include <linux/spinlock.h>
13#include <linux/delay.h>
14#include <linux/percpu.h>
15#include <linux/cpumask.h>
16#include <linux/uaccess.h>
17#include <linux/irqdomain.h>
18#include <asm/kvm_book3s.h>
19#include <asm/kvm_ppc.h>
20#include <asm/hvcall.h>
21#include <asm/xics.h>
22#include <asm/xive.h>
23#include <asm/xive-regs.h>
24#include <asm/debug.h>
25#include <asm/time.h>
26#include <asm/opal.h>
27
28#include <linux/debugfs.h>
29#include <linux/seq_file.h>
30
31#include "book3s_xive.h"
32
33
34
35
36
37
38
39
40
41#define XIVE_RUNTIME_CHECKS
42#define X_PFX xive_vm_
43#define X_STATIC static
44#define X_STAT_PFX stat_vm_
45#define __x_tima xive_tima
46#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
47#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
48#define __x_writeb __raw_writeb
49#define __x_readw __raw_readw
50#define __x_readq __raw_readq
51#define __x_writeq __raw_writeq
52
53#include "book3s_xive_template.c"
54
55
56
57
58
59#define XIVE_Q_GAP 2
60
61static bool kvmppc_xive_vcpu_has_save_restore(struct kvm_vcpu *vcpu)
62{
63 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
64
65
66 return xc->vp_cam & TM_QW1W2_HO;
67}
68
69bool kvmppc_xive_check_save_restore(struct kvm_vcpu *vcpu)
70{
71 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
72 struct kvmppc_xive *xive = xc->xive;
73
74 if (xive->flags & KVMPPC_XIVE_FLAG_SAVE_RESTORE)
75 return kvmppc_xive_vcpu_has_save_restore(vcpu);
76
77 return true;
78}
79
80
81
82
83
84void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
85{
86 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
87 u64 pq;
88
89
90
91
92
93
94 if (!tima || !vcpu->arch.xive_cam_word)
95 return;
96
97 eieio();
98 if (!kvmppc_xive_vcpu_has_save_restore(vcpu))
99 __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
100 __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
101 vcpu->arch.xive_pushed = 1;
102 eieio();
103
104
105
106
107
108
109
110
111 vcpu->arch.irq_pending = 0;
112
113
114
115
116
117 if (vcpu->arch.xive_esc_on) {
118 pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
119 XIVE_ESB_SET_PQ_01));
120 mb();
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143 if (!(pq & XIVE_ESB_VAL_P))
144
145 vcpu->arch.xive_esc_on = 0;
146 }
147}
148EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
149
150
151
152
153
154void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu)
155{
156 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
157
158 if (!vcpu->arch.xive_pushed)
159 return;
160
161
162
163
164 if (WARN_ON(!tima))
165 return;
166
167 eieio();
168
169 __raw_readl(tima + TM_SPC_PULL_OS_CTX);
170
171 if (!kvmppc_xive_vcpu_has_save_restore(vcpu))
172 vcpu->arch.xive_saved_state.w01 = __raw_readq(tima + TM_QW1_OS);
173
174
175 vcpu->arch.xive_saved_state.lsmfb = 0;
176 vcpu->arch.xive_saved_state.ack = 0xff;
177 vcpu->arch.xive_pushed = 0;
178 eieio();
179}
180EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu);
181
182void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
183{
184 void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr;
185
186 if (!esc_vaddr)
187 return;
188
189
190
191 if (vcpu->arch.xive_esc_on) {
192
193
194
195
196
197
198
199
200 vcpu->arch.ceded = 0;
201
202
203
204
205
206 __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_10);
207 } else {
208 vcpu->arch.xive_esc_on = true;
209 mb();
210 __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_00);
211 }
212 mb();
213}
214EXPORT_SYMBOL_GPL(kvmppc_xive_rearm_escalation);
215
216
217
218
219
220static bool xive_irq_trigger(struct xive_irq_data *xd)
221{
222
223 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
224 return false;
225
226
227 if (WARN_ON(!xd->trig_mmio))
228 return false;
229
230 out_be64(xd->trig_mmio, 0);
231
232 return true;
233}
234
235static irqreturn_t xive_esc_irq(int irq, void *data)
236{
237 struct kvm_vcpu *vcpu = data;
238
239 vcpu->arch.irq_pending = 1;
240 smp_mb();
241 if (vcpu->arch.ceded)
242 kvmppc_fast_vcpu_kick(vcpu);
243
244
245
246
247
248
249
250
251
252
253 vcpu->arch.xive_esc_on = false;
254
255
256 smp_wmb();
257
258 return IRQ_HANDLED;
259}
260
261int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
262 bool single_escalation)
263{
264 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
265 struct xive_q *q = &xc->queues[prio];
266 char *name = NULL;
267 int rc;
268
269
270 if (xc->esc_virq[prio])
271 return 0;
272
273
274 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
275 if (!xc->esc_virq[prio]) {
276 pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
277 prio, xc->server_num);
278 return -EIO;
279 }
280
281 if (single_escalation)
282 name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
283 vcpu->kvm->arch.lpid, xc->server_num);
284 else
285 name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
286 vcpu->kvm->arch.lpid, xc->server_num, prio);
287 if (!name) {
288 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
289 prio, xc->server_num);
290 rc = -ENOMEM;
291 goto error;
292 }
293
294 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
295
296 rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
297 IRQF_NO_THREAD, name, vcpu);
298 if (rc) {
299 pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
300 prio, xc->server_num);
301 goto error;
302 }
303 xc->esc_virq_names[prio] = name;
304
305
306
307
308
309
310
311
312
313 if (single_escalation) {
314 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
315 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
316
317 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
318 vcpu->arch.xive_esc_raddr = xd->eoi_page;
319 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
320 xd->flags |= XIVE_IRQ_FLAG_NO_EOI;
321 }
322
323 return 0;
324error:
325 irq_dispose_mapping(xc->esc_virq[prio]);
326 xc->esc_virq[prio] = 0;
327 kfree(name);
328 return rc;
329}
330
331static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
332{
333 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
334 struct kvmppc_xive *xive = xc->xive;
335 struct xive_q *q = &xc->queues[prio];
336 void *qpage;
337 int rc;
338
339 if (WARN_ON(q->qpage))
340 return 0;
341
342
343 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
344 if (!qpage) {
345 pr_err("Failed to allocate queue %d for VCPU %d\n",
346 prio, xc->server_num);
347 return -ENOMEM;
348 }
349 memset(qpage, 0, 1 << xive->q_order);
350
351
352
353
354
355
356
357
358 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
359 xive->q_order, true);
360 if (rc)
361 pr_err("Failed to configure queue %d for VCPU %d\n",
362 prio, xc->server_num);
363 return rc;
364}
365
366
367static int xive_check_provisioning(struct kvm *kvm, u8 prio)
368{
369 struct kvmppc_xive *xive = kvm->arch.xive;
370 struct kvm_vcpu *vcpu;
371 int i, rc;
372
373 lockdep_assert_held(&xive->lock);
374
375
376 if (xive->qmap & (1 << prio))
377 return 0;
378
379 pr_devel("Provisioning prio... %d\n", prio);
380
381
382 kvm_for_each_vcpu(i, vcpu, kvm) {
383 if (!vcpu->arch.xive_vcpu)
384 continue;
385 rc = xive_provision_queue(vcpu, prio);
386 if (rc == 0 && !kvmppc_xive_has_single_escalation(xive))
387 kvmppc_xive_attach_escalation(vcpu, prio,
388 kvmppc_xive_has_single_escalation(xive));
389 if (rc)
390 return rc;
391 }
392
393
394 mb();
395 xive->qmap |= (1 << prio);
396 return 0;
397}
398
399static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
400{
401 struct kvm_vcpu *vcpu;
402 struct kvmppc_xive_vcpu *xc;
403 struct xive_q *q;
404
405
406 vcpu = kvmppc_xive_find_server(kvm, server);
407 if (!vcpu) {
408 pr_warn("%s: Can't find server %d\n", __func__, server);
409 return;
410 }
411 xc = vcpu->arch.xive_vcpu;
412 if (WARN_ON(!xc))
413 return;
414
415 q = &xc->queues[prio];
416 atomic_inc(&q->pending_count);
417}
418
419static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
420{
421 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
422 struct xive_q *q;
423 u32 max;
424
425 if (WARN_ON(!xc))
426 return -ENXIO;
427 if (!xc->valid)
428 return -ENXIO;
429
430 q = &xc->queues[prio];
431 if (WARN_ON(!q->qpage))
432 return -ENXIO;
433
434
435 max = (q->msk + 1) - XIVE_Q_GAP;
436 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
437}
438
439int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
440{
441 struct kvm_vcpu *vcpu;
442 int i, rc;
443
444
445 vcpu = kvmppc_xive_find_server(kvm, *server);
446 if (!vcpu) {
447 pr_devel("Can't find server %d\n", *server);
448 return -EINVAL;
449 }
450
451 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
452
453
454 rc = xive_try_pick_queue(vcpu, prio);
455 if (rc == 0)
456 return rc;
457
458 pr_devel(" .. failed, looking up candidate...\n");
459
460
461 kvm_for_each_vcpu(i, vcpu, kvm) {
462 if (!vcpu->arch.xive_vcpu)
463 continue;
464 rc = xive_try_pick_queue(vcpu, prio);
465 if (rc == 0) {
466 *server = vcpu->arch.xive_vcpu->server_num;
467 pr_devel(" found on 0x%x/%d\n", *server, prio);
468 return rc;
469 }
470 }
471 pr_devel(" no available target !\n");
472
473
474 return -EBUSY;
475}
476
477static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
478 struct kvmppc_xive_src_block *sb,
479 struct kvmppc_xive_irq_state *state)
480{
481 struct xive_irq_data *xd;
482 u32 hw_num;
483 u8 old_prio;
484 u64 val;
485
486
487
488
489
490 for (;;) {
491 arch_spin_lock(&sb->lock);
492 old_prio = state->guest_priority;
493 state->guest_priority = MASKED;
494 mb();
495 if (!state->in_eoi)
496 break;
497 state->guest_priority = old_prio;
498 arch_spin_unlock(&sb->lock);
499 }
500
501
502 if (old_prio == MASKED)
503 return old_prio;
504
505
506 kvmppc_xive_select_irq(state, &hw_num, &xd);
507
508
509 val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
510 state->old_p = !!(val & 2);
511 state->old_q = !!(val & 1);
512
513
514
515
516
517 xive_native_sync_source(hw_num);
518
519 return old_prio;
520}
521
522static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
523 struct kvmppc_xive_irq_state *state)
524{
525
526
527
528 for (;;) {
529 arch_spin_lock(&sb->lock);
530 if (!state->in_eoi)
531 break;
532 arch_spin_unlock(&sb->lock);
533 }
534}
535
536static void xive_finish_unmask(struct kvmppc_xive *xive,
537 struct kvmppc_xive_src_block *sb,
538 struct kvmppc_xive_irq_state *state,
539 u8 prio)
540{
541 struct xive_irq_data *xd;
542 u32 hw_num;
543
544
545 if (state->guest_priority != MASKED)
546 goto bail;
547
548
549 kvmppc_xive_select_irq(state, &hw_num, &xd);
550
551
552 if (state->old_q)
553 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
554
555
556
557
558
559
560 if (!state->old_p)
561 xive_vm_source_eoi(hw_num, xd);
562
563
564 mb();
565bail:
566 state->guest_priority = prio;
567}
568
569
570
571
572
573
574
575
576static int xive_target_interrupt(struct kvm *kvm,
577 struct kvmppc_xive_irq_state *state,
578 u32 server, u8 prio)
579{
580 struct kvmppc_xive *xive = kvm->arch.xive;
581 u32 hw_num;
582 int rc;
583
584
585
586
587
588
589 rc = kvmppc_xive_select_target(kvm, &server, prio);
590
591
592
593
594
595 if (rc)
596 return rc;
597
598
599
600
601
602
603 if (state->act_priority != MASKED)
604 xive_inc_q_pending(kvm,
605 state->act_server,
606 state->act_priority);
607
608
609
610 state->act_priority = prio;
611 state->act_server = server;
612
613
614 kvmppc_xive_select_irq(state, &hw_num, NULL);
615
616 return xive_native_configure_irq(hw_num,
617 kvmppc_xive_vp(xive, server),
618 prio, state->number);
619}
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
662 u32 priority)
663{
664 struct kvmppc_xive *xive = kvm->arch.xive;
665 struct kvmppc_xive_src_block *sb;
666 struct kvmppc_xive_irq_state *state;
667 u8 new_act_prio;
668 int rc = 0;
669 u16 idx;
670
671 if (!xive)
672 return -ENODEV;
673
674 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
675 irq, server, priority);
676
677
678 if (priority != MASKED) {
679 mutex_lock(&xive->lock);
680 rc = xive_check_provisioning(xive->kvm,
681 xive_prio_from_guest(priority));
682 mutex_unlock(&xive->lock);
683 }
684 if (rc) {
685 pr_devel(" provisioning failure %d !\n", rc);
686 return rc;
687 }
688
689 sb = kvmppc_xive_find_source(xive, irq, &idx);
690 if (!sb)
691 return -EINVAL;
692 state = &sb->irq_state[idx];
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708 if (priority == MASKED)
709 xive_lock_and_mask(xive, sb, state);
710 else
711 xive_lock_for_unmask(sb, state);
712
713
714
715
716
717
718
719 new_act_prio = state->act_priority;
720 if (priority != MASKED)
721 new_act_prio = xive_prio_from_guest(priority);
722
723 pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
724 new_act_prio, state->act_server, state->act_priority);
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740 if (new_act_prio != MASKED &&
741 (state->act_server != server ||
742 state->act_priority != new_act_prio))
743 rc = xive_target_interrupt(kvm, state, server, new_act_prio);
744
745
746
747
748
749 if (priority != MASKED)
750 xive_finish_unmask(xive, sb, state, priority);
751
752
753
754
755
756 state->saved_priority = priority;
757
758 arch_spin_unlock(&sb->lock);
759 return rc;
760}
761
762int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
763 u32 *priority)
764{
765 struct kvmppc_xive *xive = kvm->arch.xive;
766 struct kvmppc_xive_src_block *sb;
767 struct kvmppc_xive_irq_state *state;
768 u16 idx;
769
770 if (!xive)
771 return -ENODEV;
772
773 sb = kvmppc_xive_find_source(xive, irq, &idx);
774 if (!sb)
775 return -EINVAL;
776 state = &sb->irq_state[idx];
777 arch_spin_lock(&sb->lock);
778 *server = state->act_server;
779 *priority = state->guest_priority;
780 arch_spin_unlock(&sb->lock);
781
782 return 0;
783}
784
785int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
786{
787 struct kvmppc_xive *xive = kvm->arch.xive;
788 struct kvmppc_xive_src_block *sb;
789 struct kvmppc_xive_irq_state *state;
790 u16 idx;
791
792 if (!xive)
793 return -ENODEV;
794
795 sb = kvmppc_xive_find_source(xive, irq, &idx);
796 if (!sb)
797 return -EINVAL;
798 state = &sb->irq_state[idx];
799
800 pr_devel("int_on(irq=0x%x)\n", irq);
801
802
803
804
805 if (state->act_priority == MASKED) {
806 pr_devel("int_on on untargetted interrupt\n");
807 return -EINVAL;
808 }
809
810
811 if (state->saved_priority == MASKED)
812 return 0;
813
814
815
816
817 xive_lock_for_unmask(sb, state);
818 xive_finish_unmask(xive, sb, state, state->saved_priority);
819 arch_spin_unlock(&sb->lock);
820
821 return 0;
822}
823
824int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
825{
826 struct kvmppc_xive *xive = kvm->arch.xive;
827 struct kvmppc_xive_src_block *sb;
828 struct kvmppc_xive_irq_state *state;
829 u16 idx;
830
831 if (!xive)
832 return -ENODEV;
833
834 sb = kvmppc_xive_find_source(xive, irq, &idx);
835 if (!sb)
836 return -EINVAL;
837 state = &sb->irq_state[idx];
838
839 pr_devel("int_off(irq=0x%x)\n", irq);
840
841
842
843
844 state->saved_priority = xive_lock_and_mask(xive, sb, state);
845 arch_spin_unlock(&sb->lock);
846
847 return 0;
848}
849
850static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
851{
852 struct kvmppc_xive_src_block *sb;
853 struct kvmppc_xive_irq_state *state;
854 u16 idx;
855
856 sb = kvmppc_xive_find_source(xive, irq, &idx);
857 if (!sb)
858 return false;
859 state = &sb->irq_state[idx];
860 if (!state->valid)
861 return false;
862
863
864
865
866
867 xive_irq_trigger(&state->ipi_data);
868
869 return true;
870}
871
872u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
873{
874 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
875
876 if (!xc)
877 return 0;
878
879
880 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
881 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
882 (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
883}
884
885int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
886{
887 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
888 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
889 u8 cppr, mfrr;
890 u32 xisr;
891
892 if (!xc || !xive)
893 return -ENOENT;
894
895
896 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
897 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
898 KVM_REG_PPC_ICP_XISR_MASK;
899 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
900
901 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
902 xc->server_num, cppr, mfrr, xisr);
903
904
905
906
907
908
909 if (WARN_ON(vcpu->arch.xive_pushed))
910 return -EIO;
911
912
913 vcpu->arch.xive_saved_state.cppr = cppr;
914 xc->hw_cppr = xc->cppr = cppr;
915
916
917
918
919
920
921
922 xc->mfrr = mfrr;
923 if (mfrr < cppr)
924 xive_irq_trigger(&xc->vp_ipi_data);
925
926
927
928
929
930
931
932
933
934
935 if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
936 xc->delayed_irq = xisr;
937 xive->delayed_irqs++;
938 pr_devel(" xisr restore delayed\n");
939 }
940
941 return 0;
942}
943
944int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
945 unsigned long host_irq)
946{
947 struct kvmppc_xive *xive = kvm->arch.xive;
948 struct kvmppc_xive_src_block *sb;
949 struct kvmppc_xive_irq_state *state;
950 struct irq_data *host_data =
951 irq_domain_get_irq_data(irq_get_default_host(), host_irq);
952 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
953 u16 idx;
954 u8 prio;
955 int rc;
956
957 if (!xive)
958 return -ENODEV;
959
960 pr_debug("%s: GIRQ 0x%lx host IRQ %ld XIVE HW IRQ 0x%x\n",
961 __func__, guest_irq, host_irq, hw_irq);
962
963 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
964 if (!sb)
965 return -EINVAL;
966 state = &sb->irq_state[idx];
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981 rc = irq_set_vcpu_affinity(host_irq, state);
982 if (rc) {
983 pr_err("Failed to set VCPU affinity for host IRQ %ld\n", host_irq);
984 return rc;
985 }
986
987
988
989
990
991
992 prio = xive_lock_and_mask(xive, sb, state);
993 pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
994 state->old_p, state->old_q);
995
996
997 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
998
999
1000
1001
1002
1003 if (xive->ops && xive->ops->reset_mapped)
1004 xive->ops->reset_mapped(kvm, guest_irq);
1005
1006
1007 state->pt_number = hw_irq;
1008 state->pt_data = irq_data_get_irq_handler_data(host_data);
1009
1010
1011
1012
1013
1014
1015
1016 xive_native_configure_irq(hw_irq,
1017 kvmppc_xive_vp(xive, state->act_server),
1018 state->act_priority, state->number);
1019
1020
1021
1022
1023
1024
1025
1026
1027 if (prio != MASKED && !state->old_p)
1028 xive_vm_source_eoi(hw_irq, state->pt_data);
1029
1030
1031 state->old_p = state->old_q = false;
1032
1033
1034 mb();
1035 state->guest_priority = prio;
1036 arch_spin_unlock(&sb->lock);
1037
1038 return 0;
1039}
1040EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
1041
1042int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
1043 unsigned long host_irq)
1044{
1045 struct kvmppc_xive *xive = kvm->arch.xive;
1046 struct kvmppc_xive_src_block *sb;
1047 struct kvmppc_xive_irq_state *state;
1048 u16 idx;
1049 u8 prio;
1050 int rc;
1051
1052 if (!xive)
1053 return -ENODEV;
1054
1055 pr_debug("%s: GIRQ 0x%lx host IRQ %ld\n", __func__, guest_irq, host_irq);
1056
1057 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
1058 if (!sb)
1059 return -EINVAL;
1060 state = &sb->irq_state[idx];
1061
1062
1063
1064
1065
1066
1067 prio = xive_lock_and_mask(xive, sb, state);
1068 pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
1069 state->old_p, state->old_q);
1070
1071
1072
1073
1074
1075
1076 if (state->old_p)
1077 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
1078
1079
1080 rc = irq_set_vcpu_affinity(host_irq, NULL);
1081 if (rc) {
1082 pr_err("Failed to clr VCPU affinity for host IRQ %ld\n", host_irq);
1083 return rc;
1084 }
1085
1086
1087 state->pt_number = 0;
1088 state->pt_data = NULL;
1089
1090
1091
1092
1093
1094 if (xive->ops && xive->ops->reset_mapped) {
1095 xive->ops->reset_mapped(kvm, guest_irq);
1096 }
1097
1098
1099 xive_native_configure_irq(state->ipi_number,
1100 kvmppc_xive_vp(xive, state->act_server),
1101 state->act_priority, state->number);
1102
1103
1104
1105
1106
1107
1108 if (prio == MASKED || state->old_p)
1109 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
1110 else
1111 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
1112
1113
1114 mb();
1115 state->guest_priority = prio;
1116 arch_spin_unlock(&sb->lock);
1117
1118 return 0;
1119}
1120EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
1121
1122void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
1123{
1124 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1125 struct kvm *kvm = vcpu->kvm;
1126 struct kvmppc_xive *xive = kvm->arch.xive;
1127 int i, j;
1128
1129 for (i = 0; i <= xive->max_sbid; i++) {
1130 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1131
1132 if (!sb)
1133 continue;
1134 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
1135 struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
1136
1137 if (!state->valid)
1138 continue;
1139 if (state->act_priority == MASKED)
1140 continue;
1141 if (state->act_server != xc->server_num)
1142 continue;
1143
1144
1145 arch_spin_lock(&sb->lock);
1146 state->act_priority = MASKED;
1147 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1148 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
1149 if (state->pt_number) {
1150 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
1151 xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
1152 }
1153 arch_spin_unlock(&sb->lock);
1154 }
1155 }
1156
1157
1158 if (vcpu->arch.xive_esc_on) {
1159 __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
1160 XIVE_ESB_SET_PQ_01));
1161 vcpu->arch.xive_esc_on = false;
1162 }
1163
1164
1165
1166
1167
1168
1169 vcpu->arch.xive_esc_vaddr = 0;
1170 vcpu->arch.xive_esc_raddr = 0;
1171}
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
1182 struct kvmppc_xive_vcpu *xc, int irq)
1183{
1184 struct irq_data *d = irq_get_irq_data(irq);
1185 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1186
1187
1188
1189
1190
1191
1192 xd->stale_p = false;
1193 smp_mb();
1194 if (!vcpu->arch.xive_esc_on)
1195 xd->stale_p = true;
1196}
1197
1198void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1199{
1200 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1201 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1202 int i;
1203
1204 if (!kvmppc_xics_enabled(vcpu))
1205 return;
1206
1207 if (!xc)
1208 return;
1209
1210 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
1211
1212
1213 xc->valid = false;
1214 kvmppc_xive_disable_vcpu_interrupts(vcpu);
1215
1216
1217 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1218
1219
1220 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1221 if (xc->esc_virq[i]) {
1222 if (kvmppc_xive_has_single_escalation(xc->xive))
1223 xive_cleanup_single_escalation(vcpu, xc,
1224 xc->esc_virq[i]);
1225 free_irq(xc->esc_virq[i], vcpu);
1226 irq_dispose_mapping(xc->esc_virq[i]);
1227 kfree(xc->esc_virq_names[i]);
1228 }
1229 }
1230
1231
1232 xive_native_disable_vp(xc->vp_id);
1233
1234
1235 vcpu->arch.xive_cam_word = 0;
1236
1237
1238 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1239 struct xive_q *q = &xc->queues[i];
1240
1241 xive_native_disable_queue(xc->vp_id, q, i);
1242 if (q->qpage) {
1243 free_pages((unsigned long)q->qpage,
1244 xive->q_page_order);
1245 q->qpage = NULL;
1246 }
1247 }
1248
1249
1250 if (xc->vp_ipi) {
1251 xive_cleanup_irq_data(&xc->vp_ipi_data);
1252 xive_native_free_irq(xc->vp_ipi);
1253 }
1254
1255 kfree(xc);
1256
1257
1258 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1259 vcpu->arch.xive_vcpu = NULL;
1260}
1261
1262static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
1263{
1264
1265
1266
1267 return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers;
1268}
1269
1270int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
1271{
1272 u32 vp_id;
1273
1274 if (!kvmppc_xive_vcpu_id_valid(xive, cpu)) {
1275 pr_devel("Out of bounds !\n");
1276 return -EINVAL;
1277 }
1278
1279 if (xive->vp_base == XIVE_INVALID_VP) {
1280 xive->vp_base = xive_native_alloc_vp_block(xive->nr_servers);
1281 pr_devel("VP_Base=%x nr_servers=%d\n", xive->vp_base, xive->nr_servers);
1282
1283 if (xive->vp_base == XIVE_INVALID_VP)
1284 return -ENOSPC;
1285 }
1286
1287 vp_id = kvmppc_xive_vp(xive, cpu);
1288 if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
1289 pr_devel("Duplicate !\n");
1290 return -EEXIST;
1291 }
1292
1293 *vp = vp_id;
1294
1295 return 0;
1296}
1297
1298int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1299 struct kvm_vcpu *vcpu, u32 cpu)
1300{
1301 struct kvmppc_xive *xive = dev->private;
1302 struct kvmppc_xive_vcpu *xc;
1303 int i, r = -EBUSY;
1304 u32 vp_id;
1305
1306 pr_devel("connect_vcpu(cpu=%d)\n", cpu);
1307
1308 if (dev->ops != &kvm_xive_ops) {
1309 pr_devel("Wrong ops !\n");
1310 return -EPERM;
1311 }
1312 if (xive->kvm != vcpu->kvm)
1313 return -EPERM;
1314 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
1315 return -EBUSY;
1316
1317
1318 mutex_lock(&xive->lock);
1319
1320 r = kvmppc_xive_compute_vp_id(xive, cpu, &vp_id);
1321 if (r)
1322 goto bail;
1323
1324 xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1325 if (!xc) {
1326 r = -ENOMEM;
1327 goto bail;
1328 }
1329
1330 vcpu->arch.xive_vcpu = xc;
1331 xc->xive = xive;
1332 xc->vcpu = vcpu;
1333 xc->server_num = cpu;
1334 xc->vp_id = vp_id;
1335 xc->mfrr = 0xff;
1336 xc->valid = true;
1337
1338 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1339 if (r)
1340 goto bail;
1341
1342 if (!kvmppc_xive_check_save_restore(vcpu)) {
1343 pr_err("inconsistent save-restore setup for VCPU %d\n", cpu);
1344 r = -EIO;
1345 goto bail;
1346 }
1347
1348
1349 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
1350 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1351
1352
1353 xc->vp_ipi = xive_native_alloc_irq();
1354 if (!xc->vp_ipi) {
1355 pr_err("Failed to allocate xive irq for VCPU IPI\n");
1356 r = -EIO;
1357 goto bail;
1358 }
1359 pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1360
1361 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1362 if (r)
1363 goto bail;
1364
1365
1366
1367
1368
1369 r = xive_native_enable_vp(xc->vp_id, kvmppc_xive_has_single_escalation(xive));
1370 if (r) {
1371 pr_err("Failed to enable VP in OPAL, err %d\n", r);
1372 goto bail;
1373 }
1374
1375
1376
1377
1378
1379
1380
1381
1382 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1383 struct xive_q *q = &xc->queues[i];
1384
1385
1386 if (i == 7 && kvmppc_xive_has_single_escalation(xive))
1387 break;
1388
1389
1390 if (xive->qmap & (1 << i)) {
1391 r = xive_provision_queue(vcpu, i);
1392 if (r == 0 && !kvmppc_xive_has_single_escalation(xive))
1393 kvmppc_xive_attach_escalation(
1394 vcpu, i, kvmppc_xive_has_single_escalation(xive));
1395 if (r)
1396 goto bail;
1397 } else {
1398 r = xive_native_configure_queue(xc->vp_id,
1399 q, i, NULL, 0, true);
1400 if (r) {
1401 pr_err("Failed to configure queue %d for VCPU %d\n",
1402 i, cpu);
1403 goto bail;
1404 }
1405 }
1406 }
1407
1408
1409 r = kvmppc_xive_attach_escalation(vcpu, 0, kvmppc_xive_has_single_escalation(xive));
1410 if (r)
1411 goto bail;
1412
1413
1414 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
1415 if (!r)
1416 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
1417
1418bail:
1419 mutex_unlock(&xive->lock);
1420 if (r) {
1421 kvmppc_xive_cleanup_vcpu(vcpu);
1422 return r;
1423 }
1424
1425 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1426 return 0;
1427}
1428
1429
1430
1431
1432static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
1433{
1434 struct kvmppc_xive_src_block *sb;
1435 struct kvmppc_xive_irq_state *state;
1436 u16 idx;
1437
1438 sb = kvmppc_xive_find_source(xive, irq, &idx);
1439 if (!sb)
1440 return;
1441
1442 state = &sb->irq_state[idx];
1443
1444
1445 if (!state->valid) {
1446 pr_err("invalid irq 0x%x in cpu queue!\n", irq);
1447 return;
1448 }
1449
1450
1451
1452
1453
1454
1455 if (!state->saved_p)
1456 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
1457
1458
1459 state->in_queue = true;
1460}
1461
1462static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
1463 struct kvmppc_xive_src_block *sb,
1464 u32 irq)
1465{
1466 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1467
1468 if (!state->valid)
1469 return;
1470
1471
1472 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
1473
1474
1475 state->saved_p = state->old_p;
1476 state->saved_q = state->old_q;
1477
1478
1479 arch_spin_unlock(&sb->lock);
1480}
1481
1482static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
1483 struct kvmppc_xive_src_block *sb,
1484 u32 irq)
1485{
1486 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1487
1488 if (!state->valid)
1489 return;
1490
1491
1492
1493
1494
1495
1496 xive_lock_for_unmask(sb, state);
1497
1498
1499 if (state->saved_scan_prio != MASKED)
1500 xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
1501
1502
1503 arch_spin_unlock(&sb->lock);
1504}
1505
1506static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
1507{
1508 u32 idx = q->idx;
1509 u32 toggle = q->toggle;
1510 u32 irq;
1511
1512 do {
1513 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
1514 if (irq > XICS_IPI)
1515 xive_pre_save_set_queued(xive, irq);
1516 } while(irq);
1517}
1518
1519static void xive_pre_save_scan(struct kvmppc_xive *xive)
1520{
1521 struct kvm_vcpu *vcpu = NULL;
1522 int i, j;
1523
1524
1525
1526
1527
1528 for (i = 0; i <= xive->max_sbid; i++) {
1529 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1530 if (!sb)
1531 continue;
1532 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1533 xive_pre_save_mask_irq(xive, sb, j);
1534 }
1535
1536
1537 kvm_for_each_vcpu(i, vcpu, xive->kvm) {
1538 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1539 if (!xc)
1540 continue;
1541 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
1542 if (xc->queues[j].qpage)
1543 xive_pre_save_queue(xive, &xc->queues[j]);
1544 }
1545 }
1546
1547
1548 for (i = 0; i <= xive->max_sbid; i++) {
1549 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1550 if (!sb)
1551 continue;
1552 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1553 xive_pre_save_unmask_irq(xive, sb, j);
1554 }
1555}
1556
1557static void xive_post_save_scan(struct kvmppc_xive *xive)
1558{
1559 u32 i, j;
1560
1561
1562 for (i = 0; i <= xive->max_sbid; i++) {
1563 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1564 if (!sb)
1565 continue;
1566 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1567 sb->irq_state[j].in_queue = false;
1568 }
1569
1570
1571 xive->saved_src_count = 0;
1572}
1573
1574
1575
1576
1577static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1578{
1579 struct kvmppc_xive_src_block *sb;
1580 struct kvmppc_xive_irq_state *state;
1581 u64 __user *ubufp = (u64 __user *) addr;
1582 u64 val, prio;
1583 u16 idx;
1584
1585 sb = kvmppc_xive_find_source(xive, irq, &idx);
1586 if (!sb)
1587 return -ENOENT;
1588
1589 state = &sb->irq_state[idx];
1590
1591 if (!state->valid)
1592 return -ENOENT;
1593
1594 pr_devel("get_source(%ld)...\n", irq);
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612 if (xive->saved_src_count == 0)
1613 xive_pre_save_scan(xive);
1614 xive->saved_src_count++;
1615
1616
1617 val = state->act_server;
1618 prio = state->saved_scan_prio;
1619
1620 if (prio == MASKED) {
1621 val |= KVM_XICS_MASKED;
1622 prio = state->saved_priority;
1623 }
1624 val |= prio << KVM_XICS_PRIORITY_SHIFT;
1625 if (state->lsi) {
1626 val |= KVM_XICS_LEVEL_SENSITIVE;
1627 if (state->saved_p)
1628 val |= KVM_XICS_PENDING;
1629 } else {
1630 if (state->saved_p)
1631 val |= KVM_XICS_PRESENTED;
1632
1633 if (state->saved_q)
1634 val |= KVM_XICS_QUEUED;
1635
1636
1637
1638
1639
1640
1641
1642 if (state->in_queue || (prio == MASKED && state->saved_q))
1643 val |= KVM_XICS_PENDING;
1644 }
1645
1646
1647
1648
1649
1650 if (xive->saved_src_count == xive->src_count)
1651 xive_post_save_scan(xive);
1652
1653
1654 if (put_user(val, ubufp))
1655 return -EFAULT;
1656
1657 return 0;
1658}
1659
1660struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
1661 struct kvmppc_xive *xive, int irq)
1662{
1663 struct kvmppc_xive_src_block *sb;
1664 int i, bid;
1665
1666 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
1667
1668 mutex_lock(&xive->lock);
1669
1670
1671 if (xive->src_blocks[bid])
1672 goto out;
1673
1674
1675 sb = kzalloc(sizeof(*sb), GFP_KERNEL);
1676 if (!sb)
1677 goto out;
1678
1679 sb->id = bid;
1680
1681 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1682 sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
1683 sb->irq_state[i].eisn = 0;
1684 sb->irq_state[i].guest_priority = MASKED;
1685 sb->irq_state[i].saved_priority = MASKED;
1686 sb->irq_state[i].act_priority = MASKED;
1687 }
1688 smp_wmb();
1689 xive->src_blocks[bid] = sb;
1690
1691 if (bid > xive->max_sbid)
1692 xive->max_sbid = bid;
1693
1694out:
1695 mutex_unlock(&xive->lock);
1696 return xive->src_blocks[bid];
1697}
1698
1699static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
1700{
1701 struct kvm *kvm = xive->kvm;
1702 struct kvm_vcpu *vcpu = NULL;
1703 int i;
1704
1705 kvm_for_each_vcpu(i, vcpu, kvm) {
1706 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1707
1708 if (!xc)
1709 continue;
1710
1711 if (xc->delayed_irq == irq) {
1712 xc->delayed_irq = 0;
1713 xive->delayed_irqs--;
1714 return true;
1715 }
1716 }
1717 return false;
1718}
1719
1720static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1721{
1722 struct kvmppc_xive_src_block *sb;
1723 struct kvmppc_xive_irq_state *state;
1724 u64 __user *ubufp = (u64 __user *) addr;
1725 u16 idx;
1726 u64 val;
1727 u8 act_prio, guest_prio;
1728 u32 server;
1729 int rc = 0;
1730
1731 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1732 return -ENOENT;
1733
1734 pr_devel("set_source(irq=0x%lx)\n", irq);
1735
1736
1737 sb = kvmppc_xive_find_source(xive, irq, &idx);
1738 if (!sb) {
1739 pr_devel("No source, creating source block...\n");
1740 sb = kvmppc_xive_create_src_block(xive, irq);
1741 if (!sb) {
1742 pr_devel("Failed to create block...\n");
1743 return -ENOMEM;
1744 }
1745 }
1746 state = &sb->irq_state[idx];
1747
1748
1749 if (get_user(val, ubufp)) {
1750 pr_devel("fault getting user info !\n");
1751 return -EFAULT;
1752 }
1753
1754 server = val & KVM_XICS_DESTINATION_MASK;
1755 guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
1756
1757 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1758 val, server, guest_prio);
1759
1760
1761
1762
1763
1764 if (!state->ipi_number) {
1765 state->ipi_number = xive_native_alloc_irq();
1766 if (state->ipi_number == 0) {
1767 pr_devel("Failed to allocate IPI !\n");
1768 return -ENOMEM;
1769 }
1770 xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
1771 pr_devel(" src_ipi=0x%x\n", state->ipi_number);
1772 }
1773
1774
1775
1776
1777
1778
1779
1780
1781 state->guest_priority = 0;
1782 xive_lock_and_mask(xive, sb, state);
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792 act_prio = xive_prio_from_guest(guest_prio);
1793 state->act_priority = MASKED;
1794
1795
1796
1797
1798
1799
1800 arch_spin_unlock(&sb->lock);
1801
1802
1803 if (act_prio != MASKED) {
1804
1805 mutex_lock(&xive->lock);
1806 rc = xive_check_provisioning(xive->kvm, act_prio);
1807 mutex_unlock(&xive->lock);
1808
1809
1810 if (rc == 0)
1811 rc = xive_target_interrupt(xive->kvm, state,
1812 server, act_prio);
1813
1814
1815
1816
1817
1818 }
1819
1820
1821
1822
1823
1824 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
1825 val |= KVM_XICS_PENDING;
1826 pr_devel(" Found delayed ! forcing PENDING !\n");
1827 }
1828
1829
1830 state->old_p = false;
1831 state->old_q = false;
1832 state->lsi = false;
1833 state->asserted = false;
1834
1835
1836 if (val & KVM_XICS_LEVEL_SENSITIVE) {
1837 state->lsi = true;
1838 if (val & KVM_XICS_PENDING)
1839 state->asserted = true;
1840 pr_devel(" LSI ! Asserted=%d\n", state->asserted);
1841 }
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853 if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
1854 state->old_p = true;
1855 if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
1856 state->old_q = true;
1857
1858 pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q);
1859
1860
1861
1862
1863
1864
1865 if (val & KVM_XICS_MASKED) {
1866 pr_devel(" masked, saving prio\n");
1867 state->guest_priority = MASKED;
1868 state->saved_priority = guest_prio;
1869 } else {
1870 pr_devel(" unmasked, restoring to prio %d\n", guest_prio);
1871 xive_finish_unmask(xive, sb, state, guest_prio);
1872 state->saved_priority = guest_prio;
1873 }
1874
1875
1876 if (!state->valid)
1877 xive->src_count++;
1878 state->valid = true;
1879
1880 return 0;
1881}
1882
1883int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1884 bool line_status)
1885{
1886 struct kvmppc_xive *xive = kvm->arch.xive;
1887 struct kvmppc_xive_src_block *sb;
1888 struct kvmppc_xive_irq_state *state;
1889 u16 idx;
1890
1891 if (!xive)
1892 return -ENODEV;
1893
1894 sb = kvmppc_xive_find_source(xive, irq, &idx);
1895 if (!sb)
1896 return -EINVAL;
1897
1898
1899 state = &sb->irq_state[idx];
1900 if (!state->valid)
1901 return -EINVAL;
1902
1903
1904 if (state->pt_number)
1905 return -EINVAL;
1906
1907 if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
1908 state->asserted = true;
1909 else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
1910 state->asserted = false;
1911 return 0;
1912 }
1913
1914
1915 xive_irq_trigger(&state->ipi_data);
1916
1917 return 0;
1918}
1919
1920int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr)
1921{
1922 u32 __user *ubufp = (u32 __user *) addr;
1923 u32 nr_servers;
1924 int rc = 0;
1925
1926 if (get_user(nr_servers, ubufp))
1927 return -EFAULT;
1928
1929 pr_devel("%s nr_servers=%u\n", __func__, nr_servers);
1930
1931 if (!nr_servers || nr_servers > KVM_MAX_VCPU_ID)
1932 return -EINVAL;
1933
1934 mutex_lock(&xive->lock);
1935 if (xive->vp_base != XIVE_INVALID_VP)
1936
1937
1938
1939
1940
1941
1942
1943 rc = -EBUSY;
1944 else if (nr_servers > KVM_MAX_VCPUS)
1945
1946
1947
1948 xive->nr_servers = KVM_MAX_VCPUS;
1949 else
1950 xive->nr_servers = nr_servers;
1951
1952 mutex_unlock(&xive->lock);
1953
1954 return rc;
1955}
1956
1957static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1958{
1959 struct kvmppc_xive *xive = dev->private;
1960
1961
1962 switch (attr->group) {
1963 case KVM_DEV_XICS_GRP_SOURCES:
1964 return xive_set_source(xive, attr->attr, attr->addr);
1965 case KVM_DEV_XICS_GRP_CTRL:
1966 switch (attr->attr) {
1967 case KVM_DEV_XICS_NR_SERVERS:
1968 return kvmppc_xive_set_nr_servers(xive, attr->addr);
1969 }
1970 }
1971 return -ENXIO;
1972}
1973
1974static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1975{
1976 struct kvmppc_xive *xive = dev->private;
1977
1978
1979 switch (attr->group) {
1980 case KVM_DEV_XICS_GRP_SOURCES:
1981 return xive_get_source(xive, attr->attr, attr->addr);
1982 }
1983 return -ENXIO;
1984}
1985
1986static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1987{
1988
1989 switch (attr->group) {
1990 case KVM_DEV_XICS_GRP_SOURCES:
1991 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1992 attr->attr < KVMPPC_XICS_NR_IRQS)
1993 return 0;
1994 break;
1995 case KVM_DEV_XICS_GRP_CTRL:
1996 switch (attr->attr) {
1997 case KVM_DEV_XICS_NR_SERVERS:
1998 return 0;
1999 }
2000 }
2001 return -ENXIO;
2002}
2003
2004static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
2005{
2006 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
2007 xive_native_configure_irq(hw_num, 0, MASKED, 0);
2008}
2009
2010void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
2011{
2012 int i;
2013
2014 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
2015 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
2016
2017 if (!state->valid)
2018 continue;
2019
2020 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
2021 xive_cleanup_irq_data(&state->ipi_data);
2022 xive_native_free_irq(state->ipi_number);
2023
2024
2025 if (state->pt_number)
2026 kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
2027
2028 state->valid = false;
2029 }
2030}
2031
2032
2033
2034
2035static void kvmppc_xive_release(struct kvm_device *dev)
2036{
2037 struct kvmppc_xive *xive = dev->private;
2038 struct kvm *kvm = xive->kvm;
2039 struct kvm_vcpu *vcpu;
2040 int i;
2041
2042 pr_devel("Releasing xive device\n");
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053 debugfs_remove(xive->dentry);
2054
2055
2056
2057
2058 kvm_for_each_vcpu(i, vcpu, kvm) {
2059
2060
2061
2062
2063
2064
2065
2066
2067 mutex_lock(&vcpu->mutex);
2068 kvmppc_xive_cleanup_vcpu(vcpu);
2069 mutex_unlock(&vcpu->mutex);
2070 }
2071
2072
2073
2074
2075
2076
2077
2078 kvm->arch.xive = NULL;
2079
2080
2081 for (i = 0; i <= xive->max_sbid; i++) {
2082 if (xive->src_blocks[i])
2083 kvmppc_xive_free_sources(xive->src_blocks[i]);
2084 kfree(xive->src_blocks[i]);
2085 xive->src_blocks[i] = NULL;
2086 }
2087
2088 if (xive->vp_base != XIVE_INVALID_VP)
2089 xive_native_free_vp_block(xive->vp_base);
2090
2091
2092
2093
2094
2095
2096
2097
2098 kfree(dev);
2099}
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type)
2111{
2112 struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ?
2113 &kvm->arch.xive_devices.native :
2114 &kvm->arch.xive_devices.xics_on_xive;
2115 struct kvmppc_xive *xive = *kvm_xive_device;
2116
2117 if (!xive) {
2118 xive = kzalloc(sizeof(*xive), GFP_KERNEL);
2119 *kvm_xive_device = xive;
2120 } else {
2121 memset(xive, 0, sizeof(*xive));
2122 }
2123
2124 return xive;
2125}
2126
2127
2128
2129
2130static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
2131{
2132 struct kvmppc_xive *xive;
2133 struct kvm *kvm = dev->kvm;
2134
2135 pr_devel("Creating xive for partition\n");
2136
2137
2138 if (kvm->arch.xive)
2139 return -EEXIST;
2140
2141 xive = kvmppc_xive_get_device(kvm, type);
2142 if (!xive)
2143 return -ENOMEM;
2144
2145 dev->private = xive;
2146 xive->dev = dev;
2147 xive->kvm = kvm;
2148 mutex_init(&xive->lock);
2149
2150
2151 xive->q_order = xive_native_default_eq_shift();
2152 if (xive->q_order < PAGE_SHIFT)
2153 xive->q_page_order = 0;
2154 else
2155 xive->q_page_order = xive->q_order - PAGE_SHIFT;
2156
2157
2158 xive->vp_base = XIVE_INVALID_VP;
2159
2160
2161
2162 xive->nr_servers = KVM_MAX_VCPUS;
2163
2164 if (xive_native_has_single_escalation())
2165 xive->flags |= KVMPPC_XIVE_FLAG_SINGLE_ESCALATION;
2166
2167 if (xive_native_has_save_restore())
2168 xive->flags |= KVMPPC_XIVE_FLAG_SAVE_RESTORE;
2169
2170 kvm->arch.xive = xive;
2171 return 0;
2172}
2173
2174int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
2175{
2176 struct kvmppc_vcore *vc = vcpu->arch.vcore;
2177
2178
2179 if (!kvmppc_xics_enabled(vcpu))
2180 return H_TOO_HARD;
2181
2182 switch (req) {
2183 case H_XIRR:
2184 return xive_vm_h_xirr(vcpu);
2185 case H_CPPR:
2186 return xive_vm_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
2187 case H_EOI:
2188 return xive_vm_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
2189 case H_IPI:
2190 return xive_vm_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
2191 kvmppc_get_gpr(vcpu, 5));
2192 case H_IPOLL:
2193 return xive_vm_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
2194 case H_XIRR_X:
2195 xive_vm_h_xirr(vcpu);
2196 kvmppc_set_gpr(vcpu, 5, get_tb() + vc->tb_offset);
2197 return H_SUCCESS;
2198 }
2199
2200 return H_UNSUPPORTED;
2201}
2202EXPORT_SYMBOL_GPL(kvmppc_xive_xics_hcall);
2203
2204int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
2205{
2206 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2207 unsigned int i;
2208
2209 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
2210 struct xive_q *q = &xc->queues[i];
2211 u32 i0, i1, idx;
2212
2213 if (!q->qpage && !xc->esc_virq[i])
2214 continue;
2215
2216 if (q->qpage) {
2217 seq_printf(m, " q[%d]: ", i);
2218 idx = q->idx;
2219 i0 = be32_to_cpup(q->qpage + idx);
2220 idx = (idx + 1) & q->msk;
2221 i1 = be32_to_cpup(q->qpage + idx);
2222 seq_printf(m, "T=%d %08x %08x...\n", q->toggle,
2223 i0, i1);
2224 }
2225 if (xc->esc_virq[i]) {
2226 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
2227 struct xive_irq_data *xd =
2228 irq_data_get_irq_handler_data(d);
2229 u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
2230
2231 seq_printf(m, " ESC %d %c%c EOI @%llx",
2232 xc->esc_virq[i],
2233 (pq & XIVE_ESB_VAL_P) ? 'P' : '-',
2234 (pq & XIVE_ESB_VAL_Q) ? 'Q' : '-',
2235 xd->eoi_page);
2236 seq_puts(m, "\n");
2237 }
2238 }
2239 return 0;
2240}
2241
2242void kvmppc_xive_debug_show_sources(struct seq_file *m,
2243 struct kvmppc_xive_src_block *sb)
2244{
2245 int i;
2246
2247 seq_puts(m, " LISN HW/CHIP TYPE PQ EISN CPU/PRIO\n");
2248 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
2249 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
2250 struct xive_irq_data *xd;
2251 u64 pq;
2252 u32 hw_num;
2253
2254 if (!state->valid)
2255 continue;
2256
2257 kvmppc_xive_select_irq(state, &hw_num, &xd);
2258
2259 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
2260
2261 seq_printf(m, "%08x %08x/%02x", state->number, hw_num,
2262 xd->src_chip);
2263 if (state->lsi)
2264 seq_printf(m, " %cLSI", state->asserted ? '^' : ' ');
2265 else
2266 seq_puts(m, " MSI");
2267
2268 seq_printf(m, " %s %c%c %08x % 4d/%d",
2269 state->ipi_number == hw_num ? "IPI" : " PT",
2270 pq & XIVE_ESB_VAL_P ? 'P' : '-',
2271 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
2272 state->eisn, state->act_server,
2273 state->act_priority);
2274
2275 seq_puts(m, "\n");
2276 }
2277}
2278
2279static int xive_debug_show(struct seq_file *m, void *private)
2280{
2281 struct kvmppc_xive *xive = m->private;
2282 struct kvm *kvm = xive->kvm;
2283 struct kvm_vcpu *vcpu;
2284 u64 t_rm_h_xirr = 0;
2285 u64 t_rm_h_ipoll = 0;
2286 u64 t_rm_h_cppr = 0;
2287 u64 t_rm_h_eoi = 0;
2288 u64 t_rm_h_ipi = 0;
2289 u64 t_vm_h_xirr = 0;
2290 u64 t_vm_h_ipoll = 0;
2291 u64 t_vm_h_cppr = 0;
2292 u64 t_vm_h_eoi = 0;
2293 u64 t_vm_h_ipi = 0;
2294 unsigned int i;
2295
2296 if (!kvm)
2297 return 0;
2298
2299 seq_puts(m, "=========\nVCPU state\n=========\n");
2300
2301 kvm_for_each_vcpu(i, vcpu, kvm) {
2302 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2303
2304 if (!xc)
2305 continue;
2306
2307 seq_printf(m, "VCPU %d: VP:%#x/%02x\n"
2308 " CPPR:%#x HWCPPR:%#x MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
2309 xc->server_num, xc->vp_id, xc->vp_chip_id,
2310 xc->cppr, xc->hw_cppr,
2311 xc->mfrr, xc->pending,
2312 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
2313
2314 kvmppc_xive_debug_show_queues(m, vcpu);
2315
2316 t_rm_h_xirr += xc->stat_rm_h_xirr;
2317 t_rm_h_ipoll += xc->stat_rm_h_ipoll;
2318 t_rm_h_cppr += xc->stat_rm_h_cppr;
2319 t_rm_h_eoi += xc->stat_rm_h_eoi;
2320 t_rm_h_ipi += xc->stat_rm_h_ipi;
2321 t_vm_h_xirr += xc->stat_vm_h_xirr;
2322 t_vm_h_ipoll += xc->stat_vm_h_ipoll;
2323 t_vm_h_cppr += xc->stat_vm_h_cppr;
2324 t_vm_h_eoi += xc->stat_vm_h_eoi;
2325 t_vm_h_ipi += xc->stat_vm_h_ipi;
2326 }
2327
2328 seq_puts(m, "Hcalls totals\n");
2329 seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
2330 seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
2331 seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
2332 seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
2333 seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
2334
2335 seq_puts(m, "=========\nSources\n=========\n");
2336
2337 for (i = 0; i <= xive->max_sbid; i++) {
2338 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
2339
2340 if (sb) {
2341 arch_spin_lock(&sb->lock);
2342 kvmppc_xive_debug_show_sources(m, sb);
2343 arch_spin_unlock(&sb->lock);
2344 }
2345 }
2346
2347 return 0;
2348}
2349
2350DEFINE_SHOW_ATTRIBUTE(xive_debug);
2351
2352static void xive_debugfs_init(struct kvmppc_xive *xive)
2353{
2354 char *name;
2355
2356 name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
2357 if (!name) {
2358 pr_err("%s: no memory for name\n", __func__);
2359 return;
2360 }
2361
2362 xive->dentry = debugfs_create_file(name, S_IRUGO, arch_debugfs_dir,
2363 xive, &xive_debug_fops);
2364
2365 pr_debug("%s: created %s\n", __func__, name);
2366 kfree(name);
2367}
2368
2369static void kvmppc_xive_init(struct kvm_device *dev)
2370{
2371 struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
2372
2373
2374 xive_debugfs_init(xive);
2375}
2376
2377struct kvm_device_ops kvm_xive_ops = {
2378 .name = "kvm-xive",
2379 .create = kvmppc_xive_create,
2380 .init = kvmppc_xive_init,
2381 .release = kvmppc_xive_release,
2382 .set_attr = xive_set_attr,
2383 .get_attr = xive_get_attr,
2384 .has_attr = xive_has_attr,
2385};
2386