1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/kvm_host.h>
12#include <linux/err.h>
13#include <linux/gfp.h>
14#include <linux/anon_inodes.h>
15#include <linux/spinlock.h>
16
17#include <linux/uaccess.h>
18#include <asm/kvm_book3s.h>
19#include <asm/kvm_ppc.h>
20#include <asm/hvcall.h>
21#include <asm/xics.h>
22#include <asm/debugfs.h>
23#include <asm/time.h>
24
25#include <linux/seq_file.h>
26
27#include "book3s_xics.h"
28
29#if 1
30#define XICS_DBG(fmt...) do { } while (0)
31#else
32#define XICS_DBG(fmt...) trace_printk(fmt)
33#endif
34
35#define ENABLE_REALMODE true
36#define DEBUG_REALMODE false
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
65 u32 new_irq, bool check_resend);
66
67
68
69
70
71
72static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
73{
74 struct ics_irq_state *state;
75 struct kvmppc_ics *ics;
76 u16 src;
77 u32 pq_old, pq_new;
78
79 XICS_DBG("ics deliver %#x (level: %d)\n", irq, level);
80
81 ics = kvmppc_xics_find_ics(xics, irq, &src);
82 if (!ics) {
83 XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq);
84 return -EINVAL;
85 }
86 state = &ics->irq_state[src];
87 if (!state->exists)
88 return -EINVAL;
89
90 if (level == KVM_INTERRUPT_SET_LEVEL || level == KVM_INTERRUPT_SET)
91 level = 1;
92 else if (level == KVM_INTERRUPT_UNSET)
93 level = 0;
94
95
96
97
98
99 if (!state->lsi && level == 0)
100 return 0;
101
102 do {
103 pq_old = state->pq_state;
104 if (state->lsi) {
105 if (level) {
106 if (pq_old & PQ_PRESENTED)
107
108 return 0;
109
110 pq_new = PQ_PRESENTED;
111 } else
112 pq_new = 0;
113 } else
114 pq_new = ((pq_old << 1) & 3) | PQ_PRESENTED;
115 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
116
117
118 if (pq_new == PQ_PRESENTED)
119 icp_deliver_irq(xics, NULL, irq, false);
120
121
122 if (state->host_irq)
123 state->intr_cpu = raw_smp_processor_id();
124
125 return 0;
126}
127
128static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
129 struct kvmppc_icp *icp)
130{
131 int i;
132
133 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
134 struct ics_irq_state *state = &ics->irq_state[i];
135 if (state->resend) {
136 XICS_DBG("resend %#x prio %#x\n", state->number,
137 state->priority);
138 icp_deliver_irq(xics, icp, state->number, true);
139 }
140 }
141}
142
143static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
144 struct ics_irq_state *state,
145 u32 server, u32 priority, u32 saved_priority)
146{
147 bool deliver;
148 unsigned long flags;
149
150 local_irq_save(flags);
151 arch_spin_lock(&ics->lock);
152
153 state->server = server;
154 state->priority = priority;
155 state->saved_priority = saved_priority;
156 deliver = false;
157 if ((state->masked_pending || state->resend) && priority != MASKED) {
158 state->masked_pending = 0;
159 state->resend = 0;
160 deliver = true;
161 }
162
163 arch_spin_unlock(&ics->lock);
164 local_irq_restore(flags);
165
166 return deliver;
167}
168
169int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
170{
171 struct kvmppc_xics *xics = kvm->arch.xics;
172 struct kvmppc_icp *icp;
173 struct kvmppc_ics *ics;
174 struct ics_irq_state *state;
175 u16 src;
176
177 if (!xics)
178 return -ENODEV;
179
180 ics = kvmppc_xics_find_ics(xics, irq, &src);
181 if (!ics)
182 return -EINVAL;
183 state = &ics->irq_state[src];
184
185 icp = kvmppc_xics_find_server(kvm, server);
186 if (!icp)
187 return -EINVAL;
188
189 XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
190 irq, server, priority,
191 state->masked_pending, state->resend);
192
193 if (write_xive(xics, ics, state, server, priority, priority))
194 icp_deliver_irq(xics, icp, irq, false);
195
196 return 0;
197}
198
199int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
200{
201 struct kvmppc_xics *xics = kvm->arch.xics;
202 struct kvmppc_ics *ics;
203 struct ics_irq_state *state;
204 u16 src;
205 unsigned long flags;
206
207 if (!xics)
208 return -ENODEV;
209
210 ics = kvmppc_xics_find_ics(xics, irq, &src);
211 if (!ics)
212 return -EINVAL;
213 state = &ics->irq_state[src];
214
215 local_irq_save(flags);
216 arch_spin_lock(&ics->lock);
217 *server = state->server;
218 *priority = state->priority;
219 arch_spin_unlock(&ics->lock);
220 local_irq_restore(flags);
221
222 return 0;
223}
224
225int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
226{
227 struct kvmppc_xics *xics = kvm->arch.xics;
228 struct kvmppc_icp *icp;
229 struct kvmppc_ics *ics;
230 struct ics_irq_state *state;
231 u16 src;
232
233 if (!xics)
234 return -ENODEV;
235
236 ics = kvmppc_xics_find_ics(xics, irq, &src);
237 if (!ics)
238 return -EINVAL;
239 state = &ics->irq_state[src];
240
241 icp = kvmppc_xics_find_server(kvm, state->server);
242 if (!icp)
243 return -EINVAL;
244
245 if (write_xive(xics, ics, state, state->server, state->saved_priority,
246 state->saved_priority))
247 icp_deliver_irq(xics, icp, irq, false);
248
249 return 0;
250}
251
252int kvmppc_xics_int_off(struct kvm *kvm, u32 irq)
253{
254 struct kvmppc_xics *xics = kvm->arch.xics;
255 struct kvmppc_ics *ics;
256 struct ics_irq_state *state;
257 u16 src;
258
259 if (!xics)
260 return -ENODEV;
261
262 ics = kvmppc_xics_find_ics(xics, irq, &src);
263 if (!ics)
264 return -EINVAL;
265 state = &ics->irq_state[src];
266
267 write_xive(xics, ics, state, state->server, MASKED, state->priority);
268
269 return 0;
270}
271
272
273
274static inline bool icp_try_update(struct kvmppc_icp *icp,
275 union kvmppc_icp_state old,
276 union kvmppc_icp_state new,
277 bool change_self)
278{
279 bool success;
280
281
282 new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
283
284
285 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
286 if (!success)
287 goto bail;
288
289 XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
290 icp->server_num,
291 old.cppr, old.mfrr, old.pending_pri, old.xisr,
292 old.need_resend, old.out_ee);
293 XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
294 new.cppr, new.mfrr, new.pending_pri, new.xisr,
295 new.need_resend, new.out_ee);
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311 if (new.out_ee) {
312 kvmppc_book3s_queue_irqprio(icp->vcpu,
313 BOOK3S_INTERRUPT_EXTERNAL);
314 if (!change_self)
315 kvmppc_fast_vcpu_kick(icp->vcpu);
316 }
317 bail:
318 return success;
319}
320
321static void icp_check_resend(struct kvmppc_xics *xics,
322 struct kvmppc_icp *icp)
323{
324 u32 icsid;
325
326
327 smp_rmb();
328 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
329 struct kvmppc_ics *ics = xics->ics[icsid];
330
331 if (!test_and_clear_bit(icsid, icp->resend_map))
332 continue;
333 if (!ics)
334 continue;
335 ics_check_resend(xics, ics, icp);
336 }
337}
338
339static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
340 u32 *reject)
341{
342 union kvmppc_icp_state old_state, new_state;
343 bool success;
344
345 XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq, priority,
346 icp->server_num);
347
348 do {
349 old_state = new_state = READ_ONCE(icp->state);
350
351 *reject = 0;
352
353
354 success = new_state.cppr > priority &&
355 new_state.mfrr > priority &&
356 new_state.pending_pri > priority;
357
358
359
360
361
362 if (success) {
363 *reject = new_state.xisr;
364 new_state.xisr = irq;
365 new_state.pending_pri = priority;
366 } else {
367
368
369
370
371
372 new_state.need_resend = true;
373 }
374
375 } while (!icp_try_update(icp, old_state, new_state, false));
376
377 return success;
378}
379
380static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
381 u32 new_irq, bool check_resend)
382{
383 struct ics_irq_state *state;
384 struct kvmppc_ics *ics;
385 u32 reject;
386 u16 src;
387 unsigned long flags;
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404 again:
405
406 ics = kvmppc_xics_find_ics(xics, new_irq, &src);
407 if (!ics) {
408 XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq);
409 return;
410 }
411 state = &ics->irq_state[src];
412
413
414 local_irq_save(flags);
415 arch_spin_lock(&ics->lock);
416
417
418 if (!icp || state->server != icp->server_num) {
419 icp = kvmppc_xics_find_server(xics->kvm, state->server);
420 if (!icp) {
421 pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
422 new_irq, state->server);
423 goto out;
424 }
425 }
426
427 if (check_resend)
428 if (!state->resend)
429 goto out;
430
431
432 state->resend = 0;
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449 if (state->priority == MASKED) {
450 XICS_DBG("irq %#x masked pending\n", new_irq);
451 state->masked_pending = 1;
452 goto out;
453 }
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471 if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
472
473
474
475 if (reject && reject != XICS_IPI) {
476 arch_spin_unlock(&ics->lock);
477 local_irq_restore(flags);
478 new_irq = reject;
479 check_resend = 0;
480 goto again;
481 }
482 } else {
483
484
485
486
487 state->resend = 1;
488
489
490
491
492
493 smp_wmb();
494 set_bit(ics->icsid, icp->resend_map);
495
496
497
498
499
500
501
502 smp_mb();
503 if (!icp->state.need_resend) {
504 state->resend = 0;
505 arch_spin_unlock(&ics->lock);
506 local_irq_restore(flags);
507 check_resend = 0;
508 goto again;
509 }
510 }
511 out:
512 arch_spin_unlock(&ics->lock);
513 local_irq_restore(flags);
514}
515
516static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
517 u8 new_cppr)
518{
519 union kvmppc_icp_state old_state, new_state;
520 bool resend;
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551 do {
552 old_state = new_state = READ_ONCE(icp->state);
553
554
555 new_state.cppr = new_cppr;
556
557
558
559
560
561
562
563
564
565
566 if (new_state.mfrr < new_cppr &&
567 new_state.mfrr <= new_state.pending_pri) {
568 WARN_ON(new_state.xisr != XICS_IPI &&
569 new_state.xisr != 0);
570 new_state.pending_pri = new_state.mfrr;
571 new_state.xisr = XICS_IPI;
572 }
573
574
575 resend = new_state.need_resend;
576 new_state.need_resend = 0;
577
578 } while (!icp_try_update(icp, old_state, new_state, true));
579
580
581
582
583
584
585 if (resend)
586 icp_check_resend(xics, icp);
587}
588
589static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
590{
591 union kvmppc_icp_state old_state, new_state;
592 struct kvmppc_icp *icp = vcpu->arch.icp;
593 u32 xirr;
594
595
596 kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
597
598
599
600
601
602
603
604
605 do {
606 old_state = new_state = READ_ONCE(icp->state);
607
608 xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
609 if (!old_state.xisr)
610 break;
611 new_state.cppr = new_state.pending_pri;
612 new_state.pending_pri = 0xff;
613 new_state.xisr = 0;
614
615 } while (!icp_try_update(icp, old_state, new_state, true));
616
617 XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr);
618
619 return xirr;
620}
621
622static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
623 unsigned long mfrr)
624{
625 union kvmppc_icp_state old_state, new_state;
626 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
627 struct kvmppc_icp *icp;
628 u32 reject;
629 bool resend;
630 bool local;
631
632 XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
633 vcpu->vcpu_id, server, mfrr);
634
635 icp = vcpu->arch.icp;
636 local = icp->server_num == server;
637 if (!local) {
638 icp = kvmppc_xics_find_server(vcpu->kvm, server);
639 if (!icp)
640 return H_PARAMETER;
641 }
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672 do {
673 old_state = new_state = READ_ONCE(icp->state);
674
675
676 new_state.mfrr = mfrr;
677
678
679 reject = 0;
680 resend = false;
681 if (mfrr < new_state.cppr) {
682
683 if (mfrr <= new_state.pending_pri) {
684 reject = new_state.xisr;
685 new_state.pending_pri = mfrr;
686 new_state.xisr = XICS_IPI;
687 }
688 }
689
690 if (mfrr > old_state.mfrr) {
691 resend = new_state.need_resend;
692 new_state.need_resend = 0;
693 }
694 } while (!icp_try_update(icp, old_state, new_state, local));
695
696
697 if (reject && reject != XICS_IPI)
698 icp_deliver_irq(xics, icp, reject, false);
699
700
701 if (resend)
702 icp_check_resend(xics, icp);
703
704 return H_SUCCESS;
705}
706
707static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
708{
709 union kvmppc_icp_state state;
710 struct kvmppc_icp *icp;
711
712 icp = vcpu->arch.icp;
713 if (icp->server_num != server) {
714 icp = kvmppc_xics_find_server(vcpu->kvm, server);
715 if (!icp)
716 return H_PARAMETER;
717 }
718 state = READ_ONCE(icp->state);
719 kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
720 kvmppc_set_gpr(vcpu, 5, state.mfrr);
721 return H_SUCCESS;
722}
723
724static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
725{
726 union kvmppc_icp_state old_state, new_state;
727 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
728 struct kvmppc_icp *icp = vcpu->arch.icp;
729 u32 reject;
730
731 XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr);
732
733
734
735
736
737
738
739
740 if (cppr > icp->state.cppr)
741 icp_down_cppr(xics, icp, cppr);
742 else if (cppr == icp->state.cppr)
743 return;
744
745
746
747
748
749
750
751
752
753
754
755
756 kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
757
758 do {
759 old_state = new_state = READ_ONCE(icp->state);
760
761 reject = 0;
762 new_state.cppr = cppr;
763
764 if (cppr <= new_state.pending_pri) {
765 reject = new_state.xisr;
766 new_state.xisr = 0;
767 new_state.pending_pri = 0xff;
768 }
769
770 } while (!icp_try_update(icp, old_state, new_state, true));
771
772
773
774
775
776 if (reject && reject != XICS_IPI)
777 icp_deliver_irq(xics, icp, reject, false);
778}
779
780static int ics_eoi(struct kvm_vcpu *vcpu, u32 irq)
781{
782 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
783 struct kvmppc_icp *icp = vcpu->arch.icp;
784 struct kvmppc_ics *ics;
785 struct ics_irq_state *state;
786 u16 src;
787 u32 pq_old, pq_new;
788
789
790
791
792
793
794
795
796
797 ics = kvmppc_xics_find_ics(xics, irq, &src);
798 if (!ics) {
799 XICS_DBG("ios_eoi: IRQ 0x%06x not found !\n", irq);
800 return H_PARAMETER;
801 }
802 state = &ics->irq_state[src];
803
804 if (state->lsi)
805 pq_new = state->pq_state;
806 else
807 do {
808 pq_old = state->pq_state;
809 pq_new = pq_old >> 1;
810 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
811
812 if (pq_new & PQ_PRESENTED)
813 icp_deliver_irq(xics, icp, irq, false);
814
815 kvm_notify_acked_irq(vcpu->kvm, 0, irq);
816
817 return H_SUCCESS;
818}
819
820static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
821{
822 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
823 struct kvmppc_icp *icp = vcpu->arch.icp;
824 u32 irq = xirr & 0x00ffffff;
825
826 XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr);
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842 icp_down_cppr(xics, icp, xirr >> 24);
843
844
845 if (irq == XICS_IPI)
846 return H_SUCCESS;
847
848 return ics_eoi(vcpu, irq);
849}
850
851int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
852{
853 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
854 struct kvmppc_icp *icp = vcpu->arch.icp;
855
856 XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
857 hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
858
859 if (icp->rm_action & XICS_RM_KICK_VCPU) {
860 icp->n_rm_kick_vcpu++;
861 kvmppc_fast_vcpu_kick(icp->rm_kick_target);
862 }
863 if (icp->rm_action & XICS_RM_CHECK_RESEND) {
864 icp->n_rm_check_resend++;
865 icp_check_resend(xics, icp->rm_resend_icp);
866 }
867 if (icp->rm_action & XICS_RM_NOTIFY_EOI) {
868 icp->n_rm_notify_eoi++;
869 kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq);
870 }
871
872 icp->rm_action = 0;
873
874 return H_SUCCESS;
875}
876EXPORT_SYMBOL_GPL(kvmppc_xics_rm_complete);
877
878int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
879{
880 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
881 unsigned long res;
882 int rc = H_SUCCESS;
883
884
885 if (!xics || !vcpu->arch.icp)
886 return H_HARDWARE;
887
888
889 switch (req) {
890 case H_XIRR_X:
891 res = kvmppc_h_xirr(vcpu);
892 kvmppc_set_gpr(vcpu, 4, res);
893 kvmppc_set_gpr(vcpu, 5, get_tb());
894 return rc;
895 case H_IPOLL:
896 rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
897 return rc;
898 }
899
900
901 if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
902 return kvmppc_xics_rm_complete(vcpu, req);
903
904 switch (req) {
905 case H_XIRR:
906 res = kvmppc_h_xirr(vcpu);
907 kvmppc_set_gpr(vcpu, 4, res);
908 break;
909 case H_CPPR:
910 kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
911 break;
912 case H_EOI:
913 rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
914 break;
915 case H_IPI:
916 rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
917 kvmppc_get_gpr(vcpu, 5));
918 break;
919 }
920
921 return rc;
922}
923EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
924
925
926
927
928static void xics_debugfs_irqmap(struct seq_file *m,
929 struct kvmppc_passthru_irqmap *pimap)
930{
931 int i;
932
933 if (!pimap)
934 return;
935 seq_printf(m, "========\nPIRQ mappings: %d maps\n===========\n",
936 pimap->n_mapped);
937 for (i = 0; i < pimap->n_mapped; i++) {
938 seq_printf(m, "r_hwirq=%x, v_hwirq=%x\n",
939 pimap->mapped[i].r_hwirq, pimap->mapped[i].v_hwirq);
940 }
941}
942
943static int xics_debug_show(struct seq_file *m, void *private)
944{
945 struct kvmppc_xics *xics = m->private;
946 struct kvm *kvm = xics->kvm;
947 struct kvm_vcpu *vcpu;
948 int icsid, i;
949 unsigned long flags;
950 unsigned long t_rm_kick_vcpu, t_rm_check_resend;
951 unsigned long t_rm_notify_eoi;
952 unsigned long t_reject, t_check_resend;
953
954 if (!kvm)
955 return 0;
956
957 t_rm_kick_vcpu = 0;
958 t_rm_notify_eoi = 0;
959 t_rm_check_resend = 0;
960 t_check_resend = 0;
961 t_reject = 0;
962
963 xics_debugfs_irqmap(m, kvm->arch.pimap);
964
965 seq_printf(m, "=========\nICP state\n=========\n");
966
967 kvm_for_each_vcpu(i, vcpu, kvm) {
968 struct kvmppc_icp *icp = vcpu->arch.icp;
969 union kvmppc_icp_state state;
970
971 if (!icp)
972 continue;
973
974 state.raw = READ_ONCE(icp->state.raw);
975 seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
976 icp->server_num, state.xisr,
977 state.pending_pri, state.cppr, state.mfrr,
978 state.out_ee, state.need_resend);
979 t_rm_kick_vcpu += icp->n_rm_kick_vcpu;
980 t_rm_notify_eoi += icp->n_rm_notify_eoi;
981 t_rm_check_resend += icp->n_rm_check_resend;
982 t_check_resend += icp->n_check_resend;
983 t_reject += icp->n_reject;
984 }
985
986 seq_printf(m, "ICP Guest->Host totals: kick_vcpu=%lu check_resend=%lu notify_eoi=%lu\n",
987 t_rm_kick_vcpu, t_rm_check_resend,
988 t_rm_notify_eoi);
989 seq_printf(m, "ICP Real Mode totals: check_resend=%lu resend=%lu\n",
990 t_check_resend, t_reject);
991 for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) {
992 struct kvmppc_ics *ics = xics->ics[icsid];
993
994 if (!ics)
995 continue;
996
997 seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
998 icsid);
999
1000 local_irq_save(flags);
1001 arch_spin_lock(&ics->lock);
1002
1003 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1004 struct ics_irq_state *irq = &ics->irq_state[i];
1005
1006 seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x pq_state %d resend %d masked pending %d\n",
1007 irq->number, irq->server, irq->priority,
1008 irq->saved_priority, irq->pq_state,
1009 irq->resend, irq->masked_pending);
1010
1011 }
1012 arch_spin_unlock(&ics->lock);
1013 local_irq_restore(flags);
1014 }
1015 return 0;
1016}
1017
1018static int xics_debug_open(struct inode *inode, struct file *file)
1019{
1020 return single_open(file, xics_debug_show, inode->i_private);
1021}
1022
1023static const struct file_operations xics_debug_fops = {
1024 .open = xics_debug_open,
1025 .read = seq_read,
1026 .llseek = seq_lseek,
1027 .release = single_release,
1028};
1029
1030static void xics_debugfs_init(struct kvmppc_xics *xics)
1031{
1032 char *name;
1033
1034 name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics);
1035 if (!name) {
1036 pr_err("%s: no memory for name\n", __func__);
1037 return;
1038 }
1039
1040 xics->dentry = debugfs_create_file(name, 0444, powerpc_debugfs_root,
1041 xics, &xics_debug_fops);
1042
1043 pr_debug("%s: created %s\n", __func__, name);
1044 kfree(name);
1045}
1046
1047static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
1048 struct kvmppc_xics *xics, int irq)
1049{
1050 struct kvmppc_ics *ics;
1051 int i, icsid;
1052
1053 icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
1054
1055 mutex_lock(&kvm->lock);
1056
1057
1058 if (xics->ics[icsid])
1059 goto out;
1060
1061
1062 ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL);
1063 if (!ics)
1064 goto out;
1065
1066 ics->icsid = icsid;
1067
1068 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1069 ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i;
1070 ics->irq_state[i].priority = MASKED;
1071 ics->irq_state[i].saved_priority = MASKED;
1072 }
1073 smp_wmb();
1074 xics->ics[icsid] = ics;
1075
1076 if (icsid > xics->max_icsid)
1077 xics->max_icsid = icsid;
1078
1079 out:
1080 mutex_unlock(&kvm->lock);
1081 return xics->ics[icsid];
1082}
1083
1084static int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
1085{
1086 struct kvmppc_icp *icp;
1087
1088 if (!vcpu->kvm->arch.xics)
1089 return -ENODEV;
1090
1091 if (kvmppc_xics_find_server(vcpu->kvm, server_num))
1092 return -EEXIST;
1093
1094 icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL);
1095 if (!icp)
1096 return -ENOMEM;
1097
1098 icp->vcpu = vcpu;
1099 icp->server_num = server_num;
1100 icp->state.mfrr = MASKED;
1101 icp->state.pending_pri = MASKED;
1102 vcpu->arch.icp = icp;
1103
1104 XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id);
1105
1106 return 0;
1107}
1108
1109u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu)
1110{
1111 struct kvmppc_icp *icp = vcpu->arch.icp;
1112 union kvmppc_icp_state state;
1113
1114 if (!icp)
1115 return 0;
1116 state = icp->state;
1117 return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) |
1118 ((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) |
1119 ((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) |
1120 ((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT);
1121}
1122
1123int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
1124{
1125 struct kvmppc_icp *icp = vcpu->arch.icp;
1126 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
1127 union kvmppc_icp_state old_state, new_state;
1128 struct kvmppc_ics *ics;
1129 u8 cppr, mfrr, pending_pri;
1130 u32 xisr;
1131 u16 src;
1132 bool resend;
1133
1134 if (!icp || !xics)
1135 return -ENOENT;
1136
1137 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
1138 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
1139 KVM_REG_PPC_ICP_XISR_MASK;
1140 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
1141 pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT;
1142
1143
1144 if (xisr == 0) {
1145 if (pending_pri != 0xff)
1146 return -EINVAL;
1147 } else if (xisr == XICS_IPI) {
1148 if (pending_pri != mfrr || pending_pri >= cppr)
1149 return -EINVAL;
1150 } else {
1151 if (pending_pri >= mfrr || pending_pri >= cppr)
1152 return -EINVAL;
1153 ics = kvmppc_xics_find_ics(xics, xisr, &src);
1154 if (!ics)
1155 return -EINVAL;
1156 }
1157
1158 new_state.raw = 0;
1159 new_state.cppr = cppr;
1160 new_state.xisr = xisr;
1161 new_state.mfrr = mfrr;
1162 new_state.pending_pri = pending_pri;
1163
1164
1165
1166
1167
1168 kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180 do {
1181 old_state = READ_ONCE(icp->state);
1182
1183 if (new_state.mfrr <= old_state.mfrr) {
1184 resend = false;
1185 new_state.need_resend = old_state.need_resend;
1186 } else {
1187 resend = old_state.need_resend;
1188 new_state.need_resend = 0;
1189 }
1190 } while (!icp_try_update(icp, old_state, new_state, false));
1191
1192 if (resend)
1193 icp_check_resend(xics, icp);
1194
1195 return 0;
1196}
1197
1198static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr)
1199{
1200 int ret;
1201 struct kvmppc_ics *ics;
1202 struct ics_irq_state *irqp;
1203 u64 __user *ubufp = (u64 __user *) addr;
1204 u16 idx;
1205 u64 val, prio;
1206 unsigned long flags;
1207
1208 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1209 if (!ics)
1210 return -ENOENT;
1211
1212 irqp = &ics->irq_state[idx];
1213 local_irq_save(flags);
1214 arch_spin_lock(&ics->lock);
1215 ret = -ENOENT;
1216 if (irqp->exists) {
1217 val = irqp->server;
1218 prio = irqp->priority;
1219 if (prio == MASKED) {
1220 val |= KVM_XICS_MASKED;
1221 prio = irqp->saved_priority;
1222 }
1223 val |= prio << KVM_XICS_PRIORITY_SHIFT;
1224 if (irqp->lsi) {
1225 val |= KVM_XICS_LEVEL_SENSITIVE;
1226 if (irqp->pq_state & PQ_PRESENTED)
1227 val |= KVM_XICS_PENDING;
1228 } else if (irqp->masked_pending || irqp->resend)
1229 val |= KVM_XICS_PENDING;
1230
1231 if (irqp->pq_state & PQ_PRESENTED)
1232 val |= KVM_XICS_PRESENTED;
1233
1234 if (irqp->pq_state & PQ_QUEUED)
1235 val |= KVM_XICS_QUEUED;
1236
1237 ret = 0;
1238 }
1239 arch_spin_unlock(&ics->lock);
1240 local_irq_restore(flags);
1241
1242 if (!ret && put_user(val, ubufp))
1243 ret = -EFAULT;
1244
1245 return ret;
1246}
1247
1248static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
1249{
1250 struct kvmppc_ics *ics;
1251 struct ics_irq_state *irqp;
1252 u64 __user *ubufp = (u64 __user *) addr;
1253 u16 idx;
1254 u64 val;
1255 u8 prio;
1256 u32 server;
1257 unsigned long flags;
1258
1259 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1260 return -ENOENT;
1261
1262 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1263 if (!ics) {
1264 ics = kvmppc_xics_create_ics(xics->kvm, xics, irq);
1265 if (!ics)
1266 return -ENOMEM;
1267 }
1268 irqp = &ics->irq_state[idx];
1269 if (get_user(val, ubufp))
1270 return -EFAULT;
1271
1272 server = val & KVM_XICS_DESTINATION_MASK;
1273 prio = val >> KVM_XICS_PRIORITY_SHIFT;
1274 if (prio != MASKED &&
1275 kvmppc_xics_find_server(xics->kvm, server) == NULL)
1276 return -EINVAL;
1277
1278 local_irq_save(flags);
1279 arch_spin_lock(&ics->lock);
1280 irqp->server = server;
1281 irqp->saved_priority = prio;
1282 if (val & KVM_XICS_MASKED)
1283 prio = MASKED;
1284 irqp->priority = prio;
1285 irqp->resend = 0;
1286 irqp->masked_pending = 0;
1287 irqp->lsi = 0;
1288 irqp->pq_state = 0;
1289 if (val & KVM_XICS_LEVEL_SENSITIVE)
1290 irqp->lsi = 1;
1291
1292 if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING)
1293 irqp->pq_state |= PQ_PRESENTED;
1294 if (val & KVM_XICS_QUEUED)
1295 irqp->pq_state |= PQ_QUEUED;
1296 irqp->exists = 1;
1297 arch_spin_unlock(&ics->lock);
1298 local_irq_restore(flags);
1299
1300 if (val & KVM_XICS_PENDING)
1301 icp_deliver_irq(xics, NULL, irqp->number, false);
1302
1303 return 0;
1304}
1305
1306int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1307 bool line_status)
1308{
1309 struct kvmppc_xics *xics = kvm->arch.xics;
1310
1311 if (!xics)
1312 return -ENODEV;
1313 return ics_deliver_irq(xics, irq, level);
1314}
1315
1316static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1317{
1318 struct kvmppc_xics *xics = dev->private;
1319
1320 switch (attr->group) {
1321 case KVM_DEV_XICS_GRP_SOURCES:
1322 return xics_set_source(xics, attr->attr, attr->addr);
1323 }
1324 return -ENXIO;
1325}
1326
1327static int xics_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1328{
1329 struct kvmppc_xics *xics = dev->private;
1330
1331 switch (attr->group) {
1332 case KVM_DEV_XICS_GRP_SOURCES:
1333 return xics_get_source(xics, attr->attr, attr->addr);
1334 }
1335 return -ENXIO;
1336}
1337
1338static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1339{
1340 switch (attr->group) {
1341 case KVM_DEV_XICS_GRP_SOURCES:
1342 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1343 attr->attr < KVMPPC_XICS_NR_IRQS)
1344 return 0;
1345 break;
1346 }
1347 return -ENXIO;
1348}
1349
1350static void kvmppc_xics_free(struct kvm_device *dev)
1351{
1352 struct kvmppc_xics *xics = dev->private;
1353 int i;
1354 struct kvm *kvm = xics->kvm;
1355
1356 debugfs_remove(xics->dentry);
1357
1358 if (kvm)
1359 kvm->arch.xics = NULL;
1360
1361 for (i = 0; i <= xics->max_icsid; i++)
1362 kfree(xics->ics[i]);
1363 kfree(xics);
1364 kfree(dev);
1365}
1366
1367static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
1368{
1369 struct kvmppc_xics *xics;
1370 struct kvm *kvm = dev->kvm;
1371 int ret = 0;
1372
1373 xics = kzalloc(sizeof(*xics), GFP_KERNEL);
1374 if (!xics)
1375 return -ENOMEM;
1376
1377 dev->private = xics;
1378 xics->dev = dev;
1379 xics->kvm = kvm;
1380
1381
1382 if (kvm->arch.xics)
1383 ret = -EEXIST;
1384 else
1385 kvm->arch.xics = xics;
1386
1387 if (ret) {
1388 kfree(xics);
1389 return ret;
1390 }
1391
1392#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1393 if (cpu_has_feature(CPU_FTR_ARCH_206) &&
1394 cpu_has_feature(CPU_FTR_HVMODE)) {
1395
1396 xics->real_mode = ENABLE_REALMODE;
1397 xics->real_mode_dbg = DEBUG_REALMODE;
1398 }
1399#endif
1400
1401 return 0;
1402}
1403
1404static void kvmppc_xics_init(struct kvm_device *dev)
1405{
1406 struct kvmppc_xics *xics = (struct kvmppc_xics *)dev->private;
1407
1408 xics_debugfs_init(xics);
1409}
1410
1411struct kvm_device_ops kvm_xics_ops = {
1412 .name = "kvm-xics",
1413 .create = kvmppc_xics_create,
1414 .init = kvmppc_xics_init,
1415 .destroy = kvmppc_xics_free,
1416 .set_attr = xics_set_attr,
1417 .get_attr = xics_get_attr,
1418 .has_attr = xics_has_attr,
1419};
1420
1421int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
1422 u32 xcpu)
1423{
1424 struct kvmppc_xics *xics = dev->private;
1425 int r = -EBUSY;
1426
1427 if (dev->ops != &kvm_xics_ops)
1428 return -EPERM;
1429 if (xics->kvm != vcpu->kvm)
1430 return -EPERM;
1431 if (vcpu->arch.irq_type)
1432 return -EBUSY;
1433
1434 r = kvmppc_xics_create_icp(vcpu, xcpu);
1435 if (!r)
1436 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1437
1438 return r;
1439}
1440
1441void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
1442{
1443 if (!vcpu->arch.icp)
1444 return;
1445 kfree(vcpu->arch.icp);
1446 vcpu->arch.icp = NULL;
1447 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1448}
1449
1450void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq,
1451 unsigned long host_irq)
1452{
1453 struct kvmppc_xics *xics = kvm->arch.xics;
1454 struct kvmppc_ics *ics;
1455 u16 idx;
1456
1457 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1458 if (!ics)
1459 return;
1460
1461 ics->irq_state[idx].host_irq = host_irq;
1462 ics->irq_state[idx].intr_cpu = -1;
1463}
1464EXPORT_SYMBOL_GPL(kvmppc_xics_set_mapped);
1465
1466void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long irq,
1467 unsigned long host_irq)
1468{
1469 struct kvmppc_xics *xics = kvm->arch.xics;
1470 struct kvmppc_ics *ics;
1471 u16 idx;
1472
1473 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1474 if (!ics)
1475 return;
1476
1477 ics->irq_state[idx].host_irq = 0;
1478}
1479EXPORT_SYMBOL_GPL(kvmppc_xics_clr_mapped);
1480