1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/kvm_host.h>
12#include <linux/err.h>
13#include <linux/gfp.h>
14#include <linux/anon_inodes.h>
15#include <linux/spinlock.h>
16
17#include <linux/uaccess.h>
18#include <asm/kvm_book3s.h>
19#include <asm/kvm_ppc.h>
20#include <asm/hvcall.h>
21#include <asm/xics.h>
22#include <asm/debugfs.h>
23#include <asm/time.h>
24
25#include <linux/seq_file.h>
26
27#include "book3s_xics.h"
28
29#if 1
30#define XICS_DBG(fmt...) do { } while (0)
31#else
32#define XICS_DBG(fmt...) trace_printk(fmt)
33#endif
34
35#define ENABLE_REALMODE true
36#define DEBUG_REALMODE false
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
65 u32 new_irq, bool check_resend);
66
67
68
69
70
71
72static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
73{
74 struct ics_irq_state *state;
75 struct kvmppc_ics *ics;
76 u16 src;
77 u32 pq_old, pq_new;
78
79 XICS_DBG("ics deliver %#x (level: %d)\n", irq, level);
80
81 ics = kvmppc_xics_find_ics(xics, irq, &src);
82 if (!ics) {
83 XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq);
84 return -EINVAL;
85 }
86 state = &ics->irq_state[src];
87 if (!state->exists)
88 return -EINVAL;
89
90 if (level == KVM_INTERRUPT_SET_LEVEL || level == KVM_INTERRUPT_SET)
91 level = 1;
92 else if (level == KVM_INTERRUPT_UNSET)
93 level = 0;
94
95
96
97
98
99 if (!state->lsi && level == 0)
100 return 0;
101
102 do {
103 pq_old = state->pq_state;
104 if (state->lsi) {
105 if (level) {
106 if (pq_old & PQ_PRESENTED)
107
108 return 0;
109
110 pq_new = PQ_PRESENTED;
111 } else
112 pq_new = 0;
113 } else
114 pq_new = ((pq_old << 1) & 3) | PQ_PRESENTED;
115 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
116
117
118 if (pq_new == PQ_PRESENTED)
119 icp_deliver_irq(xics, NULL, irq, false);
120
121
122 if (state->host_irq)
123 state->intr_cpu = raw_smp_processor_id();
124
125 return 0;
126}
127
128static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
129 struct kvmppc_icp *icp)
130{
131 int i;
132
133 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
134 struct ics_irq_state *state = &ics->irq_state[i];
135 if (state->resend) {
136 XICS_DBG("resend %#x prio %#x\n", state->number,
137 state->priority);
138 icp_deliver_irq(xics, icp, state->number, true);
139 }
140 }
141}
142
143static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
144 struct ics_irq_state *state,
145 u32 server, u32 priority, u32 saved_priority)
146{
147 bool deliver;
148 unsigned long flags;
149
150 local_irq_save(flags);
151 arch_spin_lock(&ics->lock);
152
153 state->server = server;
154 state->priority = priority;
155 state->saved_priority = saved_priority;
156 deliver = false;
157 if ((state->masked_pending || state->resend) && priority != MASKED) {
158 state->masked_pending = 0;
159 state->resend = 0;
160 deliver = true;
161 }
162
163 arch_spin_unlock(&ics->lock);
164 local_irq_restore(flags);
165
166 return deliver;
167}
168
169int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
170{
171 struct kvmppc_xics *xics = kvm->arch.xics;
172 struct kvmppc_icp *icp;
173 struct kvmppc_ics *ics;
174 struct ics_irq_state *state;
175 u16 src;
176
177 if (!xics)
178 return -ENODEV;
179
180 ics = kvmppc_xics_find_ics(xics, irq, &src);
181 if (!ics)
182 return -EINVAL;
183 state = &ics->irq_state[src];
184
185 icp = kvmppc_xics_find_server(kvm, server);
186 if (!icp)
187 return -EINVAL;
188
189 XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
190 irq, server, priority,
191 state->masked_pending, state->resend);
192
193 if (write_xive(xics, ics, state, server, priority, priority))
194 icp_deliver_irq(xics, icp, irq, false);
195
196 return 0;
197}
198
199int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
200{
201 struct kvmppc_xics *xics = kvm->arch.xics;
202 struct kvmppc_ics *ics;
203 struct ics_irq_state *state;
204 u16 src;
205 unsigned long flags;
206
207 if (!xics)
208 return -ENODEV;
209
210 ics = kvmppc_xics_find_ics(xics, irq, &src);
211 if (!ics)
212 return -EINVAL;
213 state = &ics->irq_state[src];
214
215 local_irq_save(flags);
216 arch_spin_lock(&ics->lock);
217 *server = state->server;
218 *priority = state->priority;
219 arch_spin_unlock(&ics->lock);
220 local_irq_restore(flags);
221
222 return 0;
223}
224
225int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
226{
227 struct kvmppc_xics *xics = kvm->arch.xics;
228 struct kvmppc_icp *icp;
229 struct kvmppc_ics *ics;
230 struct ics_irq_state *state;
231 u16 src;
232
233 if (!xics)
234 return -ENODEV;
235
236 ics = kvmppc_xics_find_ics(xics, irq, &src);
237 if (!ics)
238 return -EINVAL;
239 state = &ics->irq_state[src];
240
241 icp = kvmppc_xics_find_server(kvm, state->server);
242 if (!icp)
243 return -EINVAL;
244
245 if (write_xive(xics, ics, state, state->server, state->saved_priority,
246 state->saved_priority))
247 icp_deliver_irq(xics, icp, irq, false);
248
249 return 0;
250}
251
252int kvmppc_xics_int_off(struct kvm *kvm, u32 irq)
253{
254 struct kvmppc_xics *xics = kvm->arch.xics;
255 struct kvmppc_ics *ics;
256 struct ics_irq_state *state;
257 u16 src;
258
259 if (!xics)
260 return -ENODEV;
261
262 ics = kvmppc_xics_find_ics(xics, irq, &src);
263 if (!ics)
264 return -EINVAL;
265 state = &ics->irq_state[src];
266
267 write_xive(xics, ics, state, state->server, MASKED, state->priority);
268
269 return 0;
270}
271
272
273
274static inline bool icp_try_update(struct kvmppc_icp *icp,
275 union kvmppc_icp_state old,
276 union kvmppc_icp_state new,
277 bool change_self)
278{
279 bool success;
280
281
282 new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
283
284
285 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
286 if (!success)
287 goto bail;
288
289 XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
290 icp->server_num,
291 old.cppr, old.mfrr, old.pending_pri, old.xisr,
292 old.need_resend, old.out_ee);
293 XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
294 new.cppr, new.mfrr, new.pending_pri, new.xisr,
295 new.need_resend, new.out_ee);
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311 if (new.out_ee) {
312 kvmppc_book3s_queue_irqprio(icp->vcpu,
313 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
314 if (!change_self)
315 kvmppc_fast_vcpu_kick(icp->vcpu);
316 }
317 bail:
318 return success;
319}
320
321static void icp_check_resend(struct kvmppc_xics *xics,
322 struct kvmppc_icp *icp)
323{
324 u32 icsid;
325
326
327 smp_rmb();
328 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
329 struct kvmppc_ics *ics = xics->ics[icsid];
330
331 if (!test_and_clear_bit(icsid, icp->resend_map))
332 continue;
333 if (!ics)
334 continue;
335 ics_check_resend(xics, ics, icp);
336 }
337}
338
339static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
340 u32 *reject)
341{
342 union kvmppc_icp_state old_state, new_state;
343 bool success;
344
345 XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq, priority,
346 icp->server_num);
347
348 do {
349 old_state = new_state = READ_ONCE(icp->state);
350
351 *reject = 0;
352
353
354 success = new_state.cppr > priority &&
355 new_state.mfrr > priority &&
356 new_state.pending_pri > priority;
357
358
359
360
361
362 if (success) {
363 *reject = new_state.xisr;
364 new_state.xisr = irq;
365 new_state.pending_pri = priority;
366 } else {
367
368
369
370
371
372 new_state.need_resend = true;
373 }
374
375 } while (!icp_try_update(icp, old_state, new_state, false));
376
377 return success;
378}
379
380static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
381 u32 new_irq, bool check_resend)
382{
383 struct ics_irq_state *state;
384 struct kvmppc_ics *ics;
385 u32 reject;
386 u16 src;
387 unsigned long flags;
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404 again:
405
406 ics = kvmppc_xics_find_ics(xics, new_irq, &src);
407 if (!ics) {
408 XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq);
409 return;
410 }
411 state = &ics->irq_state[src];
412
413
414 local_irq_save(flags);
415 arch_spin_lock(&ics->lock);
416
417
418 if (!icp || state->server != icp->server_num) {
419 icp = kvmppc_xics_find_server(xics->kvm, state->server);
420 if (!icp) {
421 pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
422 new_irq, state->server);
423 goto out;
424 }
425 }
426
427 if (check_resend)
428 if (!state->resend)
429 goto out;
430
431
432 state->resend = 0;
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449 if (state->priority == MASKED) {
450 XICS_DBG("irq %#x masked pending\n", new_irq);
451 state->masked_pending = 1;
452 goto out;
453 }
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471 if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
472
473
474
475 if (reject && reject != XICS_IPI) {
476 arch_spin_unlock(&ics->lock);
477 local_irq_restore(flags);
478 new_irq = reject;
479 check_resend = 0;
480 goto again;
481 }
482 } else {
483
484
485
486
487 state->resend = 1;
488
489
490
491
492
493 smp_wmb();
494 set_bit(ics->icsid, icp->resend_map);
495
496
497
498
499
500
501
502 smp_mb();
503 if (!icp->state.need_resend) {
504 state->resend = 0;
505 arch_spin_unlock(&ics->lock);
506 local_irq_restore(flags);
507 check_resend = 0;
508 goto again;
509 }
510 }
511 out:
512 arch_spin_unlock(&ics->lock);
513 local_irq_restore(flags);
514}
515
516static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
517 u8 new_cppr)
518{
519 union kvmppc_icp_state old_state, new_state;
520 bool resend;
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551 do {
552 old_state = new_state = READ_ONCE(icp->state);
553
554
555 new_state.cppr = new_cppr;
556
557
558
559
560
561
562
563
564
565
566 if (new_state.mfrr < new_cppr &&
567 new_state.mfrr <= new_state.pending_pri) {
568 WARN_ON(new_state.xisr != XICS_IPI &&
569 new_state.xisr != 0);
570 new_state.pending_pri = new_state.mfrr;
571 new_state.xisr = XICS_IPI;
572 }
573
574
575 resend = new_state.need_resend;
576 new_state.need_resend = 0;
577
578 } while (!icp_try_update(icp, old_state, new_state, true));
579
580
581
582
583
584
585 if (resend)
586 icp_check_resend(xics, icp);
587}
588
589static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
590{
591 union kvmppc_icp_state old_state, new_state;
592 struct kvmppc_icp *icp = vcpu->arch.icp;
593 u32 xirr;
594
595
596 kvmppc_book3s_dequeue_irqprio(icp->vcpu,
597 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
598
599
600
601
602
603
604
605
606 do {
607 old_state = new_state = READ_ONCE(icp->state);
608
609 xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
610 if (!old_state.xisr)
611 break;
612 new_state.cppr = new_state.pending_pri;
613 new_state.pending_pri = 0xff;
614 new_state.xisr = 0;
615
616 } while (!icp_try_update(icp, old_state, new_state, true));
617
618 XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr);
619
620 return xirr;
621}
622
623static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
624 unsigned long mfrr)
625{
626 union kvmppc_icp_state old_state, new_state;
627 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
628 struct kvmppc_icp *icp;
629 u32 reject;
630 bool resend;
631 bool local;
632
633 XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
634 vcpu->vcpu_id, server, mfrr);
635
636 icp = vcpu->arch.icp;
637 local = icp->server_num == server;
638 if (!local) {
639 icp = kvmppc_xics_find_server(vcpu->kvm, server);
640 if (!icp)
641 return H_PARAMETER;
642 }
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673 do {
674 old_state = new_state = READ_ONCE(icp->state);
675
676
677 new_state.mfrr = mfrr;
678
679
680 reject = 0;
681 resend = false;
682 if (mfrr < new_state.cppr) {
683
684 if (mfrr <= new_state.pending_pri) {
685 reject = new_state.xisr;
686 new_state.pending_pri = mfrr;
687 new_state.xisr = XICS_IPI;
688 }
689 }
690
691 if (mfrr > old_state.mfrr) {
692 resend = new_state.need_resend;
693 new_state.need_resend = 0;
694 }
695 } while (!icp_try_update(icp, old_state, new_state, local));
696
697
698 if (reject && reject != XICS_IPI)
699 icp_deliver_irq(xics, icp, reject, false);
700
701
702 if (resend)
703 icp_check_resend(xics, icp);
704
705 return H_SUCCESS;
706}
707
708static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
709{
710 union kvmppc_icp_state state;
711 struct kvmppc_icp *icp;
712
713 icp = vcpu->arch.icp;
714 if (icp->server_num != server) {
715 icp = kvmppc_xics_find_server(vcpu->kvm, server);
716 if (!icp)
717 return H_PARAMETER;
718 }
719 state = READ_ONCE(icp->state);
720 kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
721 kvmppc_set_gpr(vcpu, 5, state.mfrr);
722 return H_SUCCESS;
723}
724
725static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
726{
727 union kvmppc_icp_state old_state, new_state;
728 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
729 struct kvmppc_icp *icp = vcpu->arch.icp;
730 u32 reject;
731
732 XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr);
733
734
735
736
737
738
739
740
741 if (cppr > icp->state.cppr)
742 icp_down_cppr(xics, icp, cppr);
743 else if (cppr == icp->state.cppr)
744 return;
745
746
747
748
749
750
751
752
753
754
755
756
757 kvmppc_book3s_dequeue_irqprio(icp->vcpu,
758 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
759
760 do {
761 old_state = new_state = READ_ONCE(icp->state);
762
763 reject = 0;
764 new_state.cppr = cppr;
765
766 if (cppr <= new_state.pending_pri) {
767 reject = new_state.xisr;
768 new_state.xisr = 0;
769 new_state.pending_pri = 0xff;
770 }
771
772 } while (!icp_try_update(icp, old_state, new_state, true));
773
774
775
776
777
778 if (reject && reject != XICS_IPI)
779 icp_deliver_irq(xics, icp, reject, false);
780}
781
782static int ics_eoi(struct kvm_vcpu *vcpu, u32 irq)
783{
784 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
785 struct kvmppc_icp *icp = vcpu->arch.icp;
786 struct kvmppc_ics *ics;
787 struct ics_irq_state *state;
788 u16 src;
789 u32 pq_old, pq_new;
790
791
792
793
794
795
796
797
798
799 ics = kvmppc_xics_find_ics(xics, irq, &src);
800 if (!ics) {
801 XICS_DBG("ios_eoi: IRQ 0x%06x not found !\n", irq);
802 return H_PARAMETER;
803 }
804 state = &ics->irq_state[src];
805
806 if (state->lsi)
807 pq_new = state->pq_state;
808 else
809 do {
810 pq_old = state->pq_state;
811 pq_new = pq_old >> 1;
812 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
813
814 if (pq_new & PQ_PRESENTED)
815 icp_deliver_irq(xics, icp, irq, false);
816
817 kvm_notify_acked_irq(vcpu->kvm, 0, irq);
818
819 return H_SUCCESS;
820}
821
822static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
823{
824 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
825 struct kvmppc_icp *icp = vcpu->arch.icp;
826 u32 irq = xirr & 0x00ffffff;
827
828 XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr);
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844 icp_down_cppr(xics, icp, xirr >> 24);
845
846
847 if (irq == XICS_IPI)
848 return H_SUCCESS;
849
850 return ics_eoi(vcpu, irq);
851}
852
853int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
854{
855 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
856 struct kvmppc_icp *icp = vcpu->arch.icp;
857
858 XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
859 hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
860
861 if (icp->rm_action & XICS_RM_KICK_VCPU) {
862 icp->n_rm_kick_vcpu++;
863 kvmppc_fast_vcpu_kick(icp->rm_kick_target);
864 }
865 if (icp->rm_action & XICS_RM_CHECK_RESEND) {
866 icp->n_rm_check_resend++;
867 icp_check_resend(xics, icp->rm_resend_icp);
868 }
869 if (icp->rm_action & XICS_RM_NOTIFY_EOI) {
870 icp->n_rm_notify_eoi++;
871 kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq);
872 }
873
874 icp->rm_action = 0;
875
876 return H_SUCCESS;
877}
878EXPORT_SYMBOL_GPL(kvmppc_xics_rm_complete);
879
880int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
881{
882 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
883 unsigned long res;
884 int rc = H_SUCCESS;
885
886
887 if (!xics || !vcpu->arch.icp)
888 return H_HARDWARE;
889
890
891 switch (req) {
892 case H_XIRR_X:
893 res = kvmppc_h_xirr(vcpu);
894 kvmppc_set_gpr(vcpu, 4, res);
895 kvmppc_set_gpr(vcpu, 5, get_tb());
896 return rc;
897 case H_IPOLL:
898 rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
899 return rc;
900 }
901
902
903 if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
904 return kvmppc_xics_rm_complete(vcpu, req);
905
906 switch (req) {
907 case H_XIRR:
908 res = kvmppc_h_xirr(vcpu);
909 kvmppc_set_gpr(vcpu, 4, res);
910 break;
911 case H_CPPR:
912 kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
913 break;
914 case H_EOI:
915 rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
916 break;
917 case H_IPI:
918 rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
919 kvmppc_get_gpr(vcpu, 5));
920 break;
921 }
922
923 return rc;
924}
925EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
926
927
928
929
930static void xics_debugfs_irqmap(struct seq_file *m,
931 struct kvmppc_passthru_irqmap *pimap)
932{
933 int i;
934
935 if (!pimap)
936 return;
937 seq_printf(m, "========\nPIRQ mappings: %d maps\n===========\n",
938 pimap->n_mapped);
939 for (i = 0; i < pimap->n_mapped; i++) {
940 seq_printf(m, "r_hwirq=%x, v_hwirq=%x\n",
941 pimap->mapped[i].r_hwirq, pimap->mapped[i].v_hwirq);
942 }
943}
944
945static int xics_debug_show(struct seq_file *m, void *private)
946{
947 struct kvmppc_xics *xics = m->private;
948 struct kvm *kvm = xics->kvm;
949 struct kvm_vcpu *vcpu;
950 int icsid, i;
951 unsigned long flags;
952 unsigned long t_rm_kick_vcpu, t_rm_check_resend;
953 unsigned long t_rm_notify_eoi;
954 unsigned long t_reject, t_check_resend;
955
956 if (!kvm)
957 return 0;
958
959 t_rm_kick_vcpu = 0;
960 t_rm_notify_eoi = 0;
961 t_rm_check_resend = 0;
962 t_check_resend = 0;
963 t_reject = 0;
964
965 xics_debugfs_irqmap(m, kvm->arch.pimap);
966
967 seq_printf(m, "=========\nICP state\n=========\n");
968
969 kvm_for_each_vcpu(i, vcpu, kvm) {
970 struct kvmppc_icp *icp = vcpu->arch.icp;
971 union kvmppc_icp_state state;
972
973 if (!icp)
974 continue;
975
976 state.raw = READ_ONCE(icp->state.raw);
977 seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
978 icp->server_num, state.xisr,
979 state.pending_pri, state.cppr, state.mfrr,
980 state.out_ee, state.need_resend);
981 t_rm_kick_vcpu += icp->n_rm_kick_vcpu;
982 t_rm_notify_eoi += icp->n_rm_notify_eoi;
983 t_rm_check_resend += icp->n_rm_check_resend;
984 t_check_resend += icp->n_check_resend;
985 t_reject += icp->n_reject;
986 }
987
988 seq_printf(m, "ICP Guest->Host totals: kick_vcpu=%lu check_resend=%lu notify_eoi=%lu\n",
989 t_rm_kick_vcpu, t_rm_check_resend,
990 t_rm_notify_eoi);
991 seq_printf(m, "ICP Real Mode totals: check_resend=%lu resend=%lu\n",
992 t_check_resend, t_reject);
993 for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) {
994 struct kvmppc_ics *ics = xics->ics[icsid];
995
996 if (!ics)
997 continue;
998
999 seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
1000 icsid);
1001
1002 local_irq_save(flags);
1003 arch_spin_lock(&ics->lock);
1004
1005 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1006 struct ics_irq_state *irq = &ics->irq_state[i];
1007
1008 seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x pq_state %d resend %d masked pending %d\n",
1009 irq->number, irq->server, irq->priority,
1010 irq->saved_priority, irq->pq_state,
1011 irq->resend, irq->masked_pending);
1012
1013 }
1014 arch_spin_unlock(&ics->lock);
1015 local_irq_restore(flags);
1016 }
1017 return 0;
1018}
1019
1020static int xics_debug_open(struct inode *inode, struct file *file)
1021{
1022 return single_open(file, xics_debug_show, inode->i_private);
1023}
1024
1025static const struct file_operations xics_debug_fops = {
1026 .open = xics_debug_open,
1027 .read = seq_read,
1028 .llseek = seq_lseek,
1029 .release = single_release,
1030};
1031
1032static void xics_debugfs_init(struct kvmppc_xics *xics)
1033{
1034 char *name;
1035
1036 name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics);
1037 if (!name) {
1038 pr_err("%s: no memory for name\n", __func__);
1039 return;
1040 }
1041
1042 xics->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
1043 xics, &xics_debug_fops);
1044
1045 pr_debug("%s: created %s\n", __func__, name);
1046 kfree(name);
1047}
1048
1049static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
1050 struct kvmppc_xics *xics, int irq)
1051{
1052 struct kvmppc_ics *ics;
1053 int i, icsid;
1054
1055 icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
1056
1057 mutex_lock(&kvm->lock);
1058
1059
1060 if (xics->ics[icsid])
1061 goto out;
1062
1063
1064 ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL);
1065 if (!ics)
1066 goto out;
1067
1068 ics->icsid = icsid;
1069
1070 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1071 ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i;
1072 ics->irq_state[i].priority = MASKED;
1073 ics->irq_state[i].saved_priority = MASKED;
1074 }
1075 smp_wmb();
1076 xics->ics[icsid] = ics;
1077
1078 if (icsid > xics->max_icsid)
1079 xics->max_icsid = icsid;
1080
1081 out:
1082 mutex_unlock(&kvm->lock);
1083 return xics->ics[icsid];
1084}
1085
1086static int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
1087{
1088 struct kvmppc_icp *icp;
1089
1090 if (!vcpu->kvm->arch.xics)
1091 return -ENODEV;
1092
1093 if (kvmppc_xics_find_server(vcpu->kvm, server_num))
1094 return -EEXIST;
1095
1096 icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL);
1097 if (!icp)
1098 return -ENOMEM;
1099
1100 icp->vcpu = vcpu;
1101 icp->server_num = server_num;
1102 icp->state.mfrr = MASKED;
1103 icp->state.pending_pri = MASKED;
1104 vcpu->arch.icp = icp;
1105
1106 XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id);
1107
1108 return 0;
1109}
1110
1111u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu)
1112{
1113 struct kvmppc_icp *icp = vcpu->arch.icp;
1114 union kvmppc_icp_state state;
1115
1116 if (!icp)
1117 return 0;
1118 state = icp->state;
1119 return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) |
1120 ((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) |
1121 ((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) |
1122 ((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT);
1123}
1124
1125int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
1126{
1127 struct kvmppc_icp *icp = vcpu->arch.icp;
1128 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
1129 union kvmppc_icp_state old_state, new_state;
1130 struct kvmppc_ics *ics;
1131 u8 cppr, mfrr, pending_pri;
1132 u32 xisr;
1133 u16 src;
1134 bool resend;
1135
1136 if (!icp || !xics)
1137 return -ENOENT;
1138
1139 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
1140 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
1141 KVM_REG_PPC_ICP_XISR_MASK;
1142 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
1143 pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT;
1144
1145
1146 if (xisr == 0) {
1147 if (pending_pri != 0xff)
1148 return -EINVAL;
1149 } else if (xisr == XICS_IPI) {
1150 if (pending_pri != mfrr || pending_pri >= cppr)
1151 return -EINVAL;
1152 } else {
1153 if (pending_pri >= mfrr || pending_pri >= cppr)
1154 return -EINVAL;
1155 ics = kvmppc_xics_find_ics(xics, xisr, &src);
1156 if (!ics)
1157 return -EINVAL;
1158 }
1159
1160 new_state.raw = 0;
1161 new_state.cppr = cppr;
1162 new_state.xisr = xisr;
1163 new_state.mfrr = mfrr;
1164 new_state.pending_pri = pending_pri;
1165
1166
1167
1168
1169
1170 kvmppc_book3s_dequeue_irqprio(icp->vcpu,
1171 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183 do {
1184 old_state = READ_ONCE(icp->state);
1185
1186 if (new_state.mfrr <= old_state.mfrr) {
1187 resend = false;
1188 new_state.need_resend = old_state.need_resend;
1189 } else {
1190 resend = old_state.need_resend;
1191 new_state.need_resend = 0;
1192 }
1193 } while (!icp_try_update(icp, old_state, new_state, false));
1194
1195 if (resend)
1196 icp_check_resend(xics, icp);
1197
1198 return 0;
1199}
1200
1201static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr)
1202{
1203 int ret;
1204 struct kvmppc_ics *ics;
1205 struct ics_irq_state *irqp;
1206 u64 __user *ubufp = (u64 __user *) addr;
1207 u16 idx;
1208 u64 val, prio;
1209 unsigned long flags;
1210
1211 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1212 if (!ics)
1213 return -ENOENT;
1214
1215 irqp = &ics->irq_state[idx];
1216 local_irq_save(flags);
1217 arch_spin_lock(&ics->lock);
1218 ret = -ENOENT;
1219 if (irqp->exists) {
1220 val = irqp->server;
1221 prio = irqp->priority;
1222 if (prio == MASKED) {
1223 val |= KVM_XICS_MASKED;
1224 prio = irqp->saved_priority;
1225 }
1226 val |= prio << KVM_XICS_PRIORITY_SHIFT;
1227 if (irqp->lsi) {
1228 val |= KVM_XICS_LEVEL_SENSITIVE;
1229 if (irqp->pq_state & PQ_PRESENTED)
1230 val |= KVM_XICS_PENDING;
1231 } else if (irqp->masked_pending || irqp->resend)
1232 val |= KVM_XICS_PENDING;
1233
1234 if (irqp->pq_state & PQ_PRESENTED)
1235 val |= KVM_XICS_PRESENTED;
1236
1237 if (irqp->pq_state & PQ_QUEUED)
1238 val |= KVM_XICS_QUEUED;
1239
1240 ret = 0;
1241 }
1242 arch_spin_unlock(&ics->lock);
1243 local_irq_restore(flags);
1244
1245 if (!ret && put_user(val, ubufp))
1246 ret = -EFAULT;
1247
1248 return ret;
1249}
1250
1251static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
1252{
1253 struct kvmppc_ics *ics;
1254 struct ics_irq_state *irqp;
1255 u64 __user *ubufp = (u64 __user *) addr;
1256 u16 idx;
1257 u64 val;
1258 u8 prio;
1259 u32 server;
1260 unsigned long flags;
1261
1262 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1263 return -ENOENT;
1264
1265 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1266 if (!ics) {
1267 ics = kvmppc_xics_create_ics(xics->kvm, xics, irq);
1268 if (!ics)
1269 return -ENOMEM;
1270 }
1271 irqp = &ics->irq_state[idx];
1272 if (get_user(val, ubufp))
1273 return -EFAULT;
1274
1275 server = val & KVM_XICS_DESTINATION_MASK;
1276 prio = val >> KVM_XICS_PRIORITY_SHIFT;
1277 if (prio != MASKED &&
1278 kvmppc_xics_find_server(xics->kvm, server) == NULL)
1279 return -EINVAL;
1280
1281 local_irq_save(flags);
1282 arch_spin_lock(&ics->lock);
1283 irqp->server = server;
1284 irqp->saved_priority = prio;
1285 if (val & KVM_XICS_MASKED)
1286 prio = MASKED;
1287 irqp->priority = prio;
1288 irqp->resend = 0;
1289 irqp->masked_pending = 0;
1290 irqp->lsi = 0;
1291 irqp->pq_state = 0;
1292 if (val & KVM_XICS_LEVEL_SENSITIVE)
1293 irqp->lsi = 1;
1294
1295 if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING)
1296 irqp->pq_state |= PQ_PRESENTED;
1297 if (val & KVM_XICS_QUEUED)
1298 irqp->pq_state |= PQ_QUEUED;
1299 irqp->exists = 1;
1300 arch_spin_unlock(&ics->lock);
1301 local_irq_restore(flags);
1302
1303 if (val & KVM_XICS_PENDING)
1304 icp_deliver_irq(xics, NULL, irqp->number, false);
1305
1306 return 0;
1307}
1308
1309int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1310 bool line_status)
1311{
1312 struct kvmppc_xics *xics = kvm->arch.xics;
1313
1314 if (!xics)
1315 return -ENODEV;
1316 return ics_deliver_irq(xics, irq, level);
1317}
1318
1319static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1320{
1321 struct kvmppc_xics *xics = dev->private;
1322
1323 switch (attr->group) {
1324 case KVM_DEV_XICS_GRP_SOURCES:
1325 return xics_set_source(xics, attr->attr, attr->addr);
1326 }
1327 return -ENXIO;
1328}
1329
1330static int xics_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1331{
1332 struct kvmppc_xics *xics = dev->private;
1333
1334 switch (attr->group) {
1335 case KVM_DEV_XICS_GRP_SOURCES:
1336 return xics_get_source(xics, attr->attr, attr->addr);
1337 }
1338 return -ENXIO;
1339}
1340
1341static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1342{
1343 switch (attr->group) {
1344 case KVM_DEV_XICS_GRP_SOURCES:
1345 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1346 attr->attr < KVMPPC_XICS_NR_IRQS)
1347 return 0;
1348 break;
1349 }
1350 return -ENXIO;
1351}
1352
1353static void kvmppc_xics_free(struct kvm_device *dev)
1354{
1355 struct kvmppc_xics *xics = dev->private;
1356 int i;
1357 struct kvm *kvm = xics->kvm;
1358
1359 debugfs_remove(xics->dentry);
1360
1361 if (kvm)
1362 kvm->arch.xics = NULL;
1363
1364 for (i = 0; i <= xics->max_icsid; i++)
1365 kfree(xics->ics[i]);
1366 kfree(xics);
1367 kfree(dev);
1368}
1369
1370static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
1371{
1372 struct kvmppc_xics *xics;
1373 struct kvm *kvm = dev->kvm;
1374 int ret = 0;
1375
1376 xics = kzalloc(sizeof(*xics), GFP_KERNEL);
1377 if (!xics)
1378 return -ENOMEM;
1379
1380 dev->private = xics;
1381 xics->dev = dev;
1382 xics->kvm = kvm;
1383
1384
1385 if (kvm->arch.xics)
1386 ret = -EEXIST;
1387 else
1388 kvm->arch.xics = xics;
1389
1390 if (ret) {
1391 kfree(xics);
1392 return ret;
1393 }
1394
1395#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1396 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
1397
1398 xics->real_mode = ENABLE_REALMODE;
1399 xics->real_mode_dbg = DEBUG_REALMODE;
1400 }
1401#endif
1402
1403 return 0;
1404}
1405
1406static void kvmppc_xics_init(struct kvm_device *dev)
1407{
1408 struct kvmppc_xics *xics = (struct kvmppc_xics *)dev->private;
1409
1410 xics_debugfs_init(xics);
1411}
1412
1413struct kvm_device_ops kvm_xics_ops = {
1414 .name = "kvm-xics",
1415 .create = kvmppc_xics_create,
1416 .init = kvmppc_xics_init,
1417 .destroy = kvmppc_xics_free,
1418 .set_attr = xics_set_attr,
1419 .get_attr = xics_get_attr,
1420 .has_attr = xics_has_attr,
1421};
1422
1423int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
1424 u32 xcpu)
1425{
1426 struct kvmppc_xics *xics = dev->private;
1427 int r = -EBUSY;
1428
1429 if (dev->ops != &kvm_xics_ops)
1430 return -EPERM;
1431 if (xics->kvm != vcpu->kvm)
1432 return -EPERM;
1433 if (vcpu->arch.irq_type)
1434 return -EBUSY;
1435
1436 r = kvmppc_xics_create_icp(vcpu, xcpu);
1437 if (!r)
1438 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1439
1440 return r;
1441}
1442
1443void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
1444{
1445 if (!vcpu->arch.icp)
1446 return;
1447 kfree(vcpu->arch.icp);
1448 vcpu->arch.icp = NULL;
1449 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1450}
1451
1452void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq,
1453 unsigned long host_irq)
1454{
1455 struct kvmppc_xics *xics = kvm->arch.xics;
1456 struct kvmppc_ics *ics;
1457 u16 idx;
1458
1459 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1460 if (!ics)
1461 return;
1462
1463 ics->irq_state[idx].host_irq = host_irq;
1464 ics->irq_state[idx].intr_cpu = -1;
1465}
1466EXPORT_SYMBOL_GPL(kvmppc_xics_set_mapped);
1467
1468void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long irq,
1469 unsigned long host_irq)
1470{
1471 struct kvmppc_xics *xics = kvm->arch.xics;
1472 struct kvmppc_ics *ics;
1473 u16 idx;
1474
1475 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1476 if (!ics)
1477 return;
1478
1479 ics->irq_state[idx].host_irq = 0;
1480}
1481EXPORT_SYMBOL_GPL(kvmppc_xics_clr_mapped);
1482