1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
25
26#include <linux/linkage.h>
27#include <linux/interrupt.h>
28#include <linux/irq.h>
29#include <linux/module.h>
30#include <linux/string.h>
31#include <linux/bootmem.h>
32#include <linux/slab.h>
33#include <linux/irqnr.h>
34#include <linux/pci.h>
35
36#ifdef CONFIG_X86
37#include <asm/desc.h>
38#include <asm/ptrace.h>
39#include <asm/irq.h>
40#include <asm/idle.h>
41#include <asm/io_apic.h>
42#include <asm/xen/page.h>
43#include <asm/xen/pci.h>
44#endif
45#include <asm/sync_bitops.h>
46#include <asm/xen/hypercall.h>
47#include <asm/xen/hypervisor.h>
48
49#include <xen/xen.h>
50#include <xen/hvm.h>
51#include <xen/xen-ops.h>
52#include <xen/events.h>
53#include <xen/interface/xen.h>
54#include <xen/interface/event_channel.h>
55#include <xen/interface/hvm/hvm_op.h>
56#include <xen/interface/hvm/params.h>
57#include <xen/interface/physdev.h>
58#include <xen/interface/sched.h>
59#include <asm/hw_irq.h>
60
61
62
63
64
65static DEFINE_MUTEX(irq_mapping_update_lock);
66
67static LIST_HEAD(xen_irq_list_head);
68
69
70static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
71
72
73static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
74
75
76enum xen_irq_type {
77 IRQT_UNBOUND = 0,
78 IRQT_PIRQ,
79 IRQT_VIRQ,
80 IRQT_IPI,
81 IRQT_EVTCHN
82};
83
84
85
86
87
88
89
90
91
92
93
94
95struct irq_info {
96 struct list_head list;
97 int refcnt;
98 enum xen_irq_type type;
99 unsigned irq;
100 unsigned short evtchn;
101 unsigned short cpu;
102
103 union {
104 unsigned short virq;
105 enum ipi_vector ipi;
106 struct {
107 unsigned short pirq;
108 unsigned short gsi;
109 unsigned char flags;
110 uint16_t domid;
111 } pirq;
112 } u;
113};
114#define PIRQ_NEEDS_EOI (1 << 0)
115#define PIRQ_SHAREABLE (1 << 1)
116
117static int *evtchn_to_irq;
118#ifdef CONFIG_X86
119static unsigned long *pirq_eoi_map;
120#endif
121static bool (*pirq_needs_eoi)(unsigned irq);
122
123
124
125
126
127
128
129#define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
130
131
132
133
134#define BM(x) (unsigned long *)(x)
135
136#define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
137
138static DEFINE_PER_CPU(xen_ulong_t [NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD],
139 cpu_evtchn_mask);
140
141
142#define VALID_EVTCHN(chn) ((chn) != 0)
143
144static struct irq_chip xen_dynamic_chip;
145static struct irq_chip xen_percpu_chip;
146static struct irq_chip xen_pirq_chip;
147static void enable_dynirq(struct irq_data *data);
148static void disable_dynirq(struct irq_data *data);
149
150
151static struct irq_info *info_for_irq(unsigned irq)
152{
153 return irq_get_handler_data(irq);
154}
155
156
157static void xen_irq_info_common_init(struct irq_info *info,
158 unsigned irq,
159 enum xen_irq_type type,
160 unsigned short evtchn,
161 unsigned short cpu)
162{
163
164 BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
165
166 info->type = type;
167 info->irq = irq;
168 info->evtchn = evtchn;
169 info->cpu = cpu;
170
171 evtchn_to_irq[evtchn] = irq;
172
173 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
174}
175
176static void xen_irq_info_evtchn_init(unsigned irq,
177 unsigned short evtchn)
178{
179 struct irq_info *info = info_for_irq(irq);
180
181 xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0);
182}
183
184static void xen_irq_info_ipi_init(unsigned cpu,
185 unsigned irq,
186 unsigned short evtchn,
187 enum ipi_vector ipi)
188{
189 struct irq_info *info = info_for_irq(irq);
190
191 xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0);
192
193 info->u.ipi = ipi;
194
195 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
196}
197
198static void xen_irq_info_virq_init(unsigned cpu,
199 unsigned irq,
200 unsigned short evtchn,
201 unsigned short virq)
202{
203 struct irq_info *info = info_for_irq(irq);
204
205 xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0);
206
207 info->u.virq = virq;
208
209 per_cpu(virq_to_irq, cpu)[virq] = irq;
210}
211
212static void xen_irq_info_pirq_init(unsigned irq,
213 unsigned short evtchn,
214 unsigned short pirq,
215 unsigned short gsi,
216 uint16_t domid,
217 unsigned char flags)
218{
219 struct irq_info *info = info_for_irq(irq);
220
221 xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0);
222
223 info->u.pirq.pirq = pirq;
224 info->u.pirq.gsi = gsi;
225 info->u.pirq.domid = domid;
226 info->u.pirq.flags = flags;
227}
228
229
230
231
232static unsigned int evtchn_from_irq(unsigned irq)
233{
234 if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
235 return 0;
236
237 return info_for_irq(irq)->evtchn;
238}
239
240unsigned irq_from_evtchn(unsigned int evtchn)
241{
242 return evtchn_to_irq[evtchn];
243}
244EXPORT_SYMBOL_GPL(irq_from_evtchn);
245
246static enum ipi_vector ipi_from_irq(unsigned irq)
247{
248 struct irq_info *info = info_for_irq(irq);
249
250 BUG_ON(info == NULL);
251 BUG_ON(info->type != IRQT_IPI);
252
253 return info->u.ipi;
254}
255
256static unsigned virq_from_irq(unsigned irq)
257{
258 struct irq_info *info = info_for_irq(irq);
259
260 BUG_ON(info == NULL);
261 BUG_ON(info->type != IRQT_VIRQ);
262
263 return info->u.virq;
264}
265
266static unsigned pirq_from_irq(unsigned irq)
267{
268 struct irq_info *info = info_for_irq(irq);
269
270 BUG_ON(info == NULL);
271 BUG_ON(info->type != IRQT_PIRQ);
272
273 return info->u.pirq.pirq;
274}
275
276static enum xen_irq_type type_from_irq(unsigned irq)
277{
278 return info_for_irq(irq)->type;
279}
280
281static unsigned cpu_from_irq(unsigned irq)
282{
283 return info_for_irq(irq)->cpu;
284}
285
286static unsigned int cpu_from_evtchn(unsigned int evtchn)
287{
288 int irq = evtchn_to_irq[evtchn];
289 unsigned ret = 0;
290
291 if (irq != -1)
292 ret = cpu_from_irq(irq);
293
294 return ret;
295}
296
297#ifdef CONFIG_X86
298static bool pirq_check_eoi_map(unsigned irq)
299{
300 return test_bit(pirq_from_irq(irq), pirq_eoi_map);
301}
302#endif
303
304static bool pirq_needs_eoi_flag(unsigned irq)
305{
306 struct irq_info *info = info_for_irq(irq);
307 BUG_ON(info->type != IRQT_PIRQ);
308
309 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
310}
311
312static inline xen_ulong_t active_evtchns(unsigned int cpu,
313 struct shared_info *sh,
314 unsigned int idx)
315{
316 return sh->evtchn_pending[idx] &
317 per_cpu(cpu_evtchn_mask, cpu)[idx] &
318 ~sh->evtchn_mask[idx];
319}
320
321static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
322{
323 int irq = evtchn_to_irq[chn];
324
325 BUG_ON(irq == -1);
326#ifdef CONFIG_SMP
327 cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
328#endif
329
330 clear_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu_from_irq(irq))));
331 set_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu)));
332
333 info_for_irq(irq)->cpu = cpu;
334}
335
336static void init_evtchn_cpu_bindings(void)
337{
338 int i;
339#ifdef CONFIG_SMP
340 struct irq_info *info;
341
342
343 list_for_each_entry(info, &xen_irq_list_head, list) {
344 struct irq_desc *desc = irq_to_desc(info->irq);
345 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
346 }
347#endif
348
349 for_each_possible_cpu(i)
350 memset(per_cpu(cpu_evtchn_mask, i),
351 (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
352}
353
354static inline void clear_evtchn(int port)
355{
356 struct shared_info *s = HYPERVISOR_shared_info;
357 sync_clear_bit(port, BM(&s->evtchn_pending[0]));
358}
359
360static inline void set_evtchn(int port)
361{
362 struct shared_info *s = HYPERVISOR_shared_info;
363 sync_set_bit(port, BM(&s->evtchn_pending[0]));
364}
365
366static inline int test_evtchn(int port)
367{
368 struct shared_info *s = HYPERVISOR_shared_info;
369 return sync_test_bit(port, BM(&s->evtchn_pending[0]));
370}
371
372
373
374
375
376
377
378
379
380
381void notify_remote_via_irq(int irq)
382{
383 int evtchn = evtchn_from_irq(irq);
384
385 if (VALID_EVTCHN(evtchn))
386 notify_remote_via_evtchn(evtchn);
387}
388EXPORT_SYMBOL_GPL(notify_remote_via_irq);
389
390static void mask_evtchn(int port)
391{
392 struct shared_info *s = HYPERVISOR_shared_info;
393 sync_set_bit(port, BM(&s->evtchn_mask[0]));
394}
395
396static void unmask_evtchn(int port)
397{
398 struct shared_info *s = HYPERVISOR_shared_info;
399 unsigned int cpu = get_cpu();
400 int do_hypercall = 0, evtchn_pending = 0;
401
402 BUG_ON(!irqs_disabled());
403
404 if (unlikely((cpu != cpu_from_evtchn(port))))
405 do_hypercall = 1;
406 else {
407
408
409
410
411
412
413
414
415 sync_clear_bit(port, BM(&s->evtchn_mask[0]));
416 evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
417
418 if (unlikely(evtchn_pending && xen_hvm_domain())) {
419 sync_set_bit(port, BM(&s->evtchn_mask[0]));
420 do_hypercall = 1;
421 }
422 }
423
424
425
426
427 if (do_hypercall) {
428 struct evtchn_unmask unmask = { .port = port };
429 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
430 } else {
431 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
432
433
434
435
436
437
438 if (evtchn_pending &&
439 !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
440 BM(&vcpu_info->evtchn_pending_sel)))
441 vcpu_info->evtchn_upcall_pending = 1;
442 }
443
444 put_cpu();
445}
446
447static void xen_irq_init(unsigned irq)
448{
449 struct irq_info *info;
450#ifdef CONFIG_SMP
451 struct irq_desc *desc = irq_to_desc(irq);
452
453
454 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
455#endif
456
457 info = kzalloc(sizeof(*info), GFP_KERNEL);
458 if (info == NULL)
459 panic("Unable to allocate metadata for IRQ%d\n", irq);
460
461 info->type = IRQT_UNBOUND;
462 info->refcnt = -1;
463
464 irq_set_handler_data(irq, info);
465
466 list_add_tail(&info->list, &xen_irq_list_head);
467}
468
469static int __must_check xen_allocate_irq_dynamic(void)
470{
471 int first = 0;
472 int irq;
473
474#ifdef CONFIG_X86_IO_APIC
475
476
477
478
479
480
481
482 if (xen_initial_domain() || xen_hvm_domain())
483 first = get_nr_irqs_gsi();
484#endif
485
486 irq = irq_alloc_desc_from(first, -1);
487
488 if (irq >= 0)
489 xen_irq_init(irq);
490
491 return irq;
492}
493
494static int __must_check xen_allocate_irq_gsi(unsigned gsi)
495{
496 int irq;
497
498
499
500
501
502
503
504 if (xen_pv_domain() && !xen_initial_domain())
505 return xen_allocate_irq_dynamic();
506
507
508 if (gsi < NR_IRQS_LEGACY)
509 irq = gsi;
510 else
511 irq = irq_alloc_desc_at(gsi, -1);
512
513 xen_irq_init(irq);
514
515 return irq;
516}
517
518static void xen_free_irq(unsigned irq)
519{
520 struct irq_info *info = irq_get_handler_data(irq);
521
522 if (WARN_ON(!info))
523 return;
524
525 list_del(&info->list);
526
527 irq_set_handler_data(irq, NULL);
528
529 WARN_ON(info->refcnt > 0);
530
531 kfree(info);
532
533
534 if (irq < NR_IRQS_LEGACY)
535 return;
536
537 irq_free_desc(irq);
538}
539
540static void pirq_query_unmask(int irq)
541{
542 struct physdev_irq_status_query irq_status;
543 struct irq_info *info = info_for_irq(irq);
544
545 BUG_ON(info->type != IRQT_PIRQ);
546
547 irq_status.irq = pirq_from_irq(irq);
548 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
549 irq_status.flags = 0;
550
551 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
552 if (irq_status.flags & XENIRQSTAT_needs_eoi)
553 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
554}
555
556static bool probing_irq(int irq)
557{
558 struct irq_desc *desc = irq_to_desc(irq);
559
560 return desc && desc->action == NULL;
561}
562
563static void eoi_pirq(struct irq_data *data)
564{
565 int evtchn = evtchn_from_irq(data->irq);
566 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
567 int rc = 0;
568
569 irq_move_irq(data);
570
571 if (VALID_EVTCHN(evtchn))
572 clear_evtchn(evtchn);
573
574 if (pirq_needs_eoi(data->irq)) {
575 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
576 WARN_ON(rc);
577 }
578}
579
580static void mask_ack_pirq(struct irq_data *data)
581{
582 disable_dynirq(data);
583 eoi_pirq(data);
584}
585
586static unsigned int __startup_pirq(unsigned int irq)
587{
588 struct evtchn_bind_pirq bind_pirq;
589 struct irq_info *info = info_for_irq(irq);
590 int evtchn = evtchn_from_irq(irq);
591 int rc;
592
593 BUG_ON(info->type != IRQT_PIRQ);
594
595 if (VALID_EVTCHN(evtchn))
596 goto out;
597
598 bind_pirq.pirq = pirq_from_irq(irq);
599
600 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
601 BIND_PIRQ__WILL_SHARE : 0;
602 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
603 if (rc != 0) {
604 if (!probing_irq(irq))
605 pr_info("Failed to obtain physical IRQ %d\n", irq);
606 return 0;
607 }
608 evtchn = bind_pirq.port;
609
610 pirq_query_unmask(irq);
611
612 evtchn_to_irq[evtchn] = irq;
613 bind_evtchn_to_cpu(evtchn, 0);
614 info->evtchn = evtchn;
615
616out:
617 unmask_evtchn(evtchn);
618 eoi_pirq(irq_get_irq_data(irq));
619
620 return 0;
621}
622
623static unsigned int startup_pirq(struct irq_data *data)
624{
625 return __startup_pirq(data->irq);
626}
627
628static void shutdown_pirq(struct irq_data *data)
629{
630 struct evtchn_close close;
631 unsigned int irq = data->irq;
632 struct irq_info *info = info_for_irq(irq);
633 int evtchn = evtchn_from_irq(irq);
634
635 BUG_ON(info->type != IRQT_PIRQ);
636
637 if (!VALID_EVTCHN(evtchn))
638 return;
639
640 mask_evtchn(evtchn);
641
642 close.port = evtchn;
643 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
644 BUG();
645
646 bind_evtchn_to_cpu(evtchn, 0);
647 evtchn_to_irq[evtchn] = -1;
648 info->evtchn = 0;
649}
650
651static void enable_pirq(struct irq_data *data)
652{
653 startup_pirq(data);
654}
655
656static void disable_pirq(struct irq_data *data)
657{
658 disable_dynirq(data);
659}
660
661int xen_irq_from_gsi(unsigned gsi)
662{
663 struct irq_info *info;
664
665 list_for_each_entry(info, &xen_irq_list_head, list) {
666 if (info->type != IRQT_PIRQ)
667 continue;
668
669 if (info->u.pirq.gsi == gsi)
670 return info->irq;
671 }
672
673 return -1;
674}
675EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
676
677
678
679
680
681
682
683
684
685
686
687int xen_bind_pirq_gsi_to_irq(unsigned gsi,
688 unsigned pirq, int shareable, char *name)
689{
690 int irq = -1;
691 struct physdev_irq irq_op;
692
693 mutex_lock(&irq_mapping_update_lock);
694
695 irq = xen_irq_from_gsi(gsi);
696 if (irq != -1) {
697 pr_info("%s: returning irq %d for gsi %u\n",
698 __func__, irq, gsi);
699 goto out;
700 }
701
702 irq = xen_allocate_irq_gsi(gsi);
703 if (irq < 0)
704 goto out;
705
706 irq_op.irq = irq;
707 irq_op.vector = 0;
708
709
710
711
712 if (xen_initial_domain() &&
713 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
714 xen_free_irq(irq);
715 irq = -ENOSPC;
716 goto out;
717 }
718
719 xen_irq_info_pirq_init(irq, 0, pirq, gsi, DOMID_SELF,
720 shareable ? PIRQ_SHAREABLE : 0);
721
722 pirq_query_unmask(irq);
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738 if (shareable)
739 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
740 handle_fasteoi_irq, name);
741 else
742 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
743 handle_edge_irq, name);
744
745out:
746 mutex_unlock(&irq_mapping_update_lock);
747
748 return irq;
749}
750
751#ifdef CONFIG_PCI_MSI
752int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
753{
754 int rc;
755 struct physdev_get_free_pirq op_get_free_pirq;
756
757 op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
758 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
759
760 WARN_ONCE(rc == -ENOSYS,
761 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
762
763 return rc ? -1 : op_get_free_pirq.pirq;
764}
765
766int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
767 int pirq, const char *name, domid_t domid)
768{
769 int irq, ret;
770
771 mutex_lock(&irq_mapping_update_lock);
772
773 irq = xen_allocate_irq_dynamic();
774 if (irq < 0)
775 goto out;
776
777 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
778 name);
779
780 xen_irq_info_pirq_init(irq, 0, pirq, 0, domid, 0);
781 ret = irq_set_msi_desc(irq, msidesc);
782 if (ret < 0)
783 goto error_irq;
784out:
785 mutex_unlock(&irq_mapping_update_lock);
786 return irq;
787error_irq:
788 mutex_unlock(&irq_mapping_update_lock);
789 xen_free_irq(irq);
790 return ret;
791}
792#endif
793
794int xen_destroy_irq(int irq)
795{
796 struct irq_desc *desc;
797 struct physdev_unmap_pirq unmap_irq;
798 struct irq_info *info = info_for_irq(irq);
799 int rc = -ENOENT;
800
801 mutex_lock(&irq_mapping_update_lock);
802
803 desc = irq_to_desc(irq);
804 if (!desc)
805 goto out;
806
807 if (xen_initial_domain()) {
808 unmap_irq.pirq = info->u.pirq.pirq;
809 unmap_irq.domid = info->u.pirq.domid;
810 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
811
812
813
814
815 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
816 pr_info("domain %d does not have %d anymore\n",
817 info->u.pirq.domid, info->u.pirq.pirq);
818 else if (rc) {
819 pr_warn("unmap irq failed %d\n", rc);
820 goto out;
821 }
822 }
823
824 xen_free_irq(irq);
825
826out:
827 mutex_unlock(&irq_mapping_update_lock);
828 return rc;
829}
830
831int xen_irq_from_pirq(unsigned pirq)
832{
833 int irq;
834
835 struct irq_info *info;
836
837 mutex_lock(&irq_mapping_update_lock);
838
839 list_for_each_entry(info, &xen_irq_list_head, list) {
840 if (info->type != IRQT_PIRQ)
841 continue;
842 irq = info->irq;
843 if (info->u.pirq.pirq == pirq)
844 goto out;
845 }
846 irq = -1;
847out:
848 mutex_unlock(&irq_mapping_update_lock);
849
850 return irq;
851}
852
853
854int xen_pirq_from_irq(unsigned irq)
855{
856 return pirq_from_irq(irq);
857}
858EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
859int bind_evtchn_to_irq(unsigned int evtchn)
860{
861 int irq;
862
863 mutex_lock(&irq_mapping_update_lock);
864
865 irq = evtchn_to_irq[evtchn];
866
867 if (irq == -1) {
868 irq = xen_allocate_irq_dynamic();
869 if (irq < 0)
870 goto out;
871
872 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
873 handle_edge_irq, "event");
874
875 xen_irq_info_evtchn_init(irq, evtchn);
876 } else {
877 struct irq_info *info = info_for_irq(irq);
878 WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
879 }
880
881out:
882 mutex_unlock(&irq_mapping_update_lock);
883
884 return irq;
885}
886EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
887
888static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
889{
890 struct evtchn_bind_ipi bind_ipi;
891 int evtchn, irq;
892
893 mutex_lock(&irq_mapping_update_lock);
894
895 irq = per_cpu(ipi_to_irq, cpu)[ipi];
896
897 if (irq == -1) {
898 irq = xen_allocate_irq_dynamic();
899 if (irq < 0)
900 goto out;
901
902 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
903 handle_percpu_irq, "ipi");
904
905 bind_ipi.vcpu = xen_vcpu_nr(cpu);
906 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
907 &bind_ipi) != 0)
908 BUG();
909 evtchn = bind_ipi.port;
910
911 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
912
913 bind_evtchn_to_cpu(evtchn, cpu);
914 } else {
915 struct irq_info *info = info_for_irq(irq);
916 WARN_ON(info == NULL || info->type != IRQT_IPI);
917 }
918
919 out:
920 mutex_unlock(&irq_mapping_update_lock);
921 return irq;
922}
923
924static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
925 unsigned int remote_port)
926{
927 struct evtchn_bind_interdomain bind_interdomain;
928 int err;
929
930 bind_interdomain.remote_dom = remote_domain;
931 bind_interdomain.remote_port = remote_port;
932
933 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
934 &bind_interdomain);
935
936 return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
937}
938
939static int find_virq(unsigned int virq, unsigned int cpu)
940{
941 struct evtchn_status status;
942 int port, rc = -ENOENT;
943
944 memset(&status, 0, sizeof(status));
945 for (port = 0; port <= NR_EVENT_CHANNELS; port++) {
946 status.dom = DOMID_SELF;
947 status.port = port;
948 rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
949 if (rc < 0)
950 continue;
951 if (status.status != EVTCHNSTAT_virq)
952 continue;
953 if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
954 rc = port;
955 break;
956 }
957 }
958 return rc;
959}
960
961int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
962{
963 struct evtchn_bind_virq bind_virq;
964 int evtchn, irq, ret;
965
966 mutex_lock(&irq_mapping_update_lock);
967
968 irq = per_cpu(virq_to_irq, cpu)[virq];
969
970 if (irq == -1) {
971 irq = xen_allocate_irq_dynamic();
972 if (irq < 0)
973 goto out;
974
975 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
976 handle_percpu_irq, "virq");
977
978 bind_virq.virq = virq;
979 bind_virq.vcpu = xen_vcpu_nr(cpu);
980 ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
981 &bind_virq);
982 if (ret == 0)
983 evtchn = bind_virq.port;
984 else {
985 if (ret == -EEXIST)
986 ret = find_virq(virq, cpu);
987 BUG_ON(ret < 0);
988 evtchn = ret;
989 }
990
991 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
992
993 bind_evtchn_to_cpu(evtchn, cpu);
994 } else {
995 struct irq_info *info = info_for_irq(irq);
996 WARN_ON(info == NULL || info->type != IRQT_VIRQ);
997 }
998
999out:
1000 mutex_unlock(&irq_mapping_update_lock);
1001
1002 return irq;
1003}
1004
1005static void unbind_from_irq(unsigned int irq)
1006{
1007 struct evtchn_close close;
1008 int evtchn = evtchn_from_irq(irq);
1009 struct irq_info *info = irq_get_handler_data(irq);
1010
1011 if (WARN_ON(!info))
1012 return;
1013
1014 mutex_lock(&irq_mapping_update_lock);
1015
1016 if (info->refcnt > 0) {
1017 info->refcnt--;
1018 if (info->refcnt != 0)
1019 goto done;
1020 }
1021
1022 if (VALID_EVTCHN(evtchn)) {
1023 close.port = evtchn;
1024 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
1025 BUG();
1026
1027 switch (type_from_irq(irq)) {
1028 case IRQT_VIRQ:
1029 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
1030 [virq_from_irq(irq)] = -1;
1031 break;
1032 case IRQT_IPI:
1033 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
1034 [ipi_from_irq(irq)] = -1;
1035 break;
1036 default:
1037 break;
1038 }
1039
1040
1041 bind_evtchn_to_cpu(evtchn, 0);
1042
1043 evtchn_to_irq[evtchn] = -1;
1044 }
1045
1046 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
1047
1048 xen_free_irq(irq);
1049
1050 done:
1051 mutex_unlock(&irq_mapping_update_lock);
1052}
1053
1054int bind_evtchn_to_irqhandler(unsigned int evtchn,
1055 irq_handler_t handler,
1056 unsigned long irqflags,
1057 const char *devname, void *dev_id)
1058{
1059 int irq, retval;
1060
1061 irq = bind_evtchn_to_irq(evtchn);
1062 if (irq < 0)
1063 return irq;
1064 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1065 if (retval != 0) {
1066 unbind_from_irq(irq);
1067 return retval;
1068 }
1069
1070 return irq;
1071}
1072EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
1073
1074int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
1075 unsigned int remote_port,
1076 irq_handler_t handler,
1077 unsigned long irqflags,
1078 const char *devname,
1079 void *dev_id)
1080{
1081 int irq, retval;
1082
1083 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
1084 if (irq < 0)
1085 return irq;
1086
1087 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1088 if (retval != 0) {
1089 unbind_from_irq(irq);
1090 return retval;
1091 }
1092
1093 return irq;
1094}
1095EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
1096
1097int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
1098 irq_handler_t handler,
1099 unsigned long irqflags, const char *devname, void *dev_id)
1100{
1101 int irq, retval;
1102
1103 irq = bind_virq_to_irq(virq, cpu);
1104 if (irq < 0)
1105 return irq;
1106 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1107 if (retval != 0) {
1108 unbind_from_irq(irq);
1109 return retval;
1110 }
1111
1112 return irq;
1113}
1114EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
1115
1116int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1117 unsigned int cpu,
1118 irq_handler_t handler,
1119 unsigned long irqflags,
1120 const char *devname,
1121 void *dev_id)
1122{
1123 int irq, retval;
1124
1125 irq = bind_ipi_to_irq(ipi, cpu);
1126 if (irq < 0)
1127 return irq;
1128
1129 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
1130 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1131 if (retval != 0) {
1132 unbind_from_irq(irq);
1133 return retval;
1134 }
1135
1136 return irq;
1137}
1138
1139void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1140{
1141 struct irq_info *info = irq_get_handler_data(irq);
1142
1143 if (WARN_ON(!info))
1144 return;
1145 free_irq(irq, dev_id);
1146 unbind_from_irq(irq);
1147}
1148EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1149
1150int evtchn_make_refcounted(unsigned int evtchn)
1151{
1152 int irq = evtchn_to_irq[evtchn];
1153 struct irq_info *info;
1154
1155 if (irq == -1)
1156 return -ENOENT;
1157
1158 info = irq_get_handler_data(irq);
1159
1160 if (!info)
1161 return -ENOENT;
1162
1163 WARN_ON(info->refcnt != -1);
1164
1165 info->refcnt = 1;
1166
1167 return 0;
1168}
1169EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
1170
1171int evtchn_get(unsigned int evtchn)
1172{
1173 int irq;
1174 struct irq_info *info;
1175 int err = -ENOENT;
1176
1177 if (evtchn >= NR_EVENT_CHANNELS)
1178 return -EINVAL;
1179
1180 mutex_lock(&irq_mapping_update_lock);
1181
1182 irq = evtchn_to_irq[evtchn];
1183 if (irq == -1)
1184 goto done;
1185
1186 info = irq_get_handler_data(irq);
1187
1188 if (!info)
1189 goto done;
1190
1191 err = -EINVAL;
1192 if (info->refcnt <= 0)
1193 goto done;
1194
1195 info->refcnt++;
1196 err = 0;
1197 done:
1198 mutex_unlock(&irq_mapping_update_lock);
1199
1200 return err;
1201}
1202EXPORT_SYMBOL_GPL(evtchn_get);
1203
1204void evtchn_put(unsigned int evtchn)
1205{
1206 int irq = evtchn_to_irq[evtchn];
1207 if (WARN_ON(irq == -1))
1208 return;
1209 unbind_from_irq(irq);
1210}
1211EXPORT_SYMBOL_GPL(evtchn_put);
1212
1213void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1214{
1215 int irq = per_cpu(ipi_to_irq, cpu)[vector];
1216 BUG_ON(irq < 0);
1217 notify_remote_via_irq(irq);
1218}
1219
1220irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
1221{
1222 struct shared_info *sh = HYPERVISOR_shared_info;
1223 int cpu = smp_processor_id();
1224 xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
1225 int i;
1226 unsigned long flags;
1227 static DEFINE_SPINLOCK(debug_lock);
1228 struct vcpu_info *v;
1229
1230 spin_lock_irqsave(&debug_lock, flags);
1231
1232 printk("\nvcpu %d\n ", cpu);
1233
1234 for_each_online_cpu(i) {
1235 int pending;
1236 v = per_cpu(xen_vcpu, i);
1237 pending = (get_irq_regs() && i == cpu)
1238 ? xen_irqs_disabled(get_irq_regs())
1239 : v->evtchn_upcall_mask;
1240 printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n ", i,
1241 pending, v->evtchn_upcall_pending,
1242 (int)(sizeof(v->evtchn_pending_sel)*2),
1243 v->evtchn_pending_sel);
1244 }
1245 v = per_cpu(xen_vcpu, cpu);
1246
1247 printk("\npending:\n ");
1248 for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
1249 printk("%0*"PRI_xen_ulong"%s",
1250 (int)sizeof(sh->evtchn_pending[0])*2,
1251 sh->evtchn_pending[i],
1252 i % 8 == 0 ? "\n " : " ");
1253 printk("\nglobal mask:\n ");
1254 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1255 printk("%0*"PRI_xen_ulong"%s",
1256 (int)(sizeof(sh->evtchn_mask[0])*2),
1257 sh->evtchn_mask[i],
1258 i % 8 == 0 ? "\n " : " ");
1259
1260 printk("\nglobally unmasked:\n ");
1261 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1262 printk("%0*"PRI_xen_ulong"%s",
1263 (int)(sizeof(sh->evtchn_mask[0])*2),
1264 sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
1265 i % 8 == 0 ? "\n " : " ");
1266
1267 printk("\nlocal cpu%d mask:\n ", cpu);
1268 for (i = (NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
1269 printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
1270 cpu_evtchn[i],
1271 i % 8 == 0 ? "\n " : " ");
1272
1273 printk("\nlocally unmasked:\n ");
1274 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
1275 xen_ulong_t pending = sh->evtchn_pending[i]
1276 & ~sh->evtchn_mask[i]
1277 & cpu_evtchn[i];
1278 printk("%0*"PRI_xen_ulong"%s",
1279 (int)(sizeof(sh->evtchn_mask[0])*2),
1280 pending, i % 8 == 0 ? "\n " : " ");
1281 }
1282
1283 printk("\npending list:\n");
1284 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1285 if (sync_test_bit(i, BM(sh->evtchn_pending))) {
1286 int word_idx = i / BITS_PER_EVTCHN_WORD;
1287 printk(" %d: event %d -> irq %d%s%s%s\n",
1288 cpu_from_evtchn(i), i,
1289 evtchn_to_irq[i],
1290 sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
1291 ? "" : " l2-clear",
1292 !sync_test_bit(i, BM(sh->evtchn_mask))
1293 ? "" : " globally-masked",
1294 sync_test_bit(i, BM(cpu_evtchn))
1295 ? "" : " locally-masked");
1296 }
1297 }
1298
1299 spin_unlock_irqrestore(&debug_lock, flags);
1300
1301 return IRQ_HANDLED;
1302}
1303
1304static DEFINE_PER_CPU(unsigned, xed_nesting_count);
1305static DEFINE_PER_CPU(unsigned int, current_word_idx);
1306static DEFINE_PER_CPU(unsigned int, current_bit_idx);
1307
1308
1309
1310
1311#define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322static void __xen_evtchn_do_upcall(void)
1323{
1324 int start_word_idx, start_bit_idx;
1325 int word_idx, bit_idx;
1326 int i, irq;
1327 int cpu = get_cpu();
1328 struct shared_info *s = HYPERVISOR_shared_info;
1329 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
1330 unsigned count;
1331
1332 do {
1333 xen_ulong_t pending_words;
1334 xen_ulong_t pending_bits;
1335 struct irq_desc *desc;
1336
1337 vcpu_info->evtchn_upcall_pending = 0;
1338
1339 if (__this_cpu_inc_return(xed_nesting_count) - 1)
1340 goto out;
1341
1342
1343
1344
1345
1346
1347 if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) {
1348 int evtchn = evtchn_from_irq(irq);
1349 word_idx = evtchn / BITS_PER_LONG;
1350 pending_bits = evtchn % BITS_PER_LONG;
1351 if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) {
1352 desc = irq_to_desc(irq);
1353 if (desc)
1354 generic_handle_irq_desc(irq, desc);
1355 }
1356 }
1357
1358 pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
1359
1360 start_word_idx = __this_cpu_read(current_word_idx);
1361 start_bit_idx = __this_cpu_read(current_bit_idx);
1362
1363 word_idx = start_word_idx;
1364
1365 for (i = 0; pending_words != 0; i++) {
1366 xen_ulong_t words;
1367
1368 words = MASK_LSBS(pending_words, word_idx);
1369
1370
1371
1372
1373 if (words == 0) {
1374 word_idx = 0;
1375 bit_idx = 0;
1376 continue;
1377 }
1378 word_idx = EVTCHN_FIRST_BIT(words);
1379
1380 pending_bits = active_evtchns(cpu, s, word_idx);
1381 bit_idx = 0;
1382 if (word_idx == start_word_idx) {
1383
1384 if (i == 0)
1385
1386 bit_idx = start_bit_idx;
1387 else
1388
1389 bit_idx &= (1UL << start_bit_idx) - 1;
1390 }
1391
1392 do {
1393 xen_ulong_t bits;
1394 int port;
1395
1396 bits = MASK_LSBS(pending_bits, bit_idx);
1397
1398
1399 if (bits == 0)
1400 break;
1401
1402 bit_idx = EVTCHN_FIRST_BIT(bits);
1403
1404
1405 port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
1406 irq = evtchn_to_irq[port];
1407
1408 if (irq != -1) {
1409 desc = irq_to_desc(irq);
1410 if (desc)
1411 generic_handle_irq_desc(irq, desc);
1412 }
1413
1414 bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
1415
1416
1417 __this_cpu_write(current_word_idx,
1418 bit_idx ? word_idx :
1419 (word_idx+1) % BITS_PER_EVTCHN_WORD);
1420 __this_cpu_write(current_bit_idx, bit_idx);
1421 } while (bit_idx != 0);
1422
1423
1424 if ((word_idx != start_word_idx) || (i != 0))
1425 pending_words &= ~(1UL << word_idx);
1426
1427 word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
1428 }
1429
1430 BUG_ON(!irqs_disabled());
1431
1432 count = __this_cpu_read(xed_nesting_count);
1433 __this_cpu_write(xed_nesting_count, 0);
1434 } while (count != 1 || vcpu_info->evtchn_upcall_pending);
1435
1436out:
1437
1438 put_cpu();
1439}
1440
1441void xen_evtchn_do_upcall(struct pt_regs *regs)
1442{
1443 struct pt_regs *old_regs = set_irq_regs(regs);
1444
1445 irq_enter();
1446#ifdef CONFIG_X86
1447 exit_idle();
1448#endif
1449 rh_inc_irq_stat(irq_hv_callback_count);
1450
1451 __xen_evtchn_do_upcall();
1452
1453 irq_exit();
1454 set_irq_regs(old_regs);
1455}
1456
1457void xen_hvm_evtchn_do_upcall(void)
1458{
1459 __xen_evtchn_do_upcall();
1460}
1461EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
1462
1463
1464void rebind_evtchn_irq(int evtchn, int irq)
1465{
1466 struct irq_info *info = info_for_irq(irq);
1467
1468 if (WARN_ON(!info))
1469 return;
1470
1471
1472
1473 disable_irq(irq);
1474
1475 mutex_lock(&irq_mapping_update_lock);
1476
1477
1478 BUG_ON(evtchn_to_irq[evtchn] != -1);
1479
1480
1481 BUG_ON(info->type == IRQT_UNBOUND);
1482
1483 xen_irq_info_evtchn_init(irq, evtchn);
1484
1485 mutex_unlock(&irq_mapping_update_lock);
1486
1487
1488 irq_set_affinity(irq, cpumask_of(0));
1489
1490
1491 enable_irq(irq);
1492}
1493
1494
1495static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1496{
1497 struct shared_info *s = HYPERVISOR_shared_info;
1498 struct evtchn_bind_vcpu bind_vcpu;
1499 int evtchn = evtchn_from_irq(irq);
1500 int masked;
1501
1502 if (!VALID_EVTCHN(evtchn))
1503 return -1;
1504
1505
1506
1507
1508
1509 if (xen_hvm_domain() && !xen_have_vector_callback)
1510 return -1;
1511
1512
1513 bind_vcpu.port = evtchn;
1514 bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
1515
1516
1517
1518
1519
1520 masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
1521
1522
1523
1524
1525
1526
1527 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1528 bind_evtchn_to_cpu(evtchn, tcpu);
1529
1530 if (!masked)
1531 unmask_evtchn(evtchn);
1532
1533 return 0;
1534}
1535
1536static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1537 bool force)
1538{
1539 unsigned tcpu = cpumask_first(dest);
1540
1541 return rebind_irq_to_cpu(data->irq, tcpu);
1542}
1543
1544int resend_irq_on_evtchn(unsigned int irq)
1545{
1546 int masked, evtchn = evtchn_from_irq(irq);
1547 struct shared_info *s = HYPERVISOR_shared_info;
1548
1549 if (!VALID_EVTCHN(evtchn))
1550 return 1;
1551
1552 masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
1553 sync_set_bit(evtchn, BM(s->evtchn_pending));
1554 if (!masked)
1555 unmask_evtchn(evtchn);
1556
1557 return 1;
1558}
1559
1560static void enable_dynirq(struct irq_data *data)
1561{
1562 int evtchn = evtchn_from_irq(data->irq);
1563
1564 if (VALID_EVTCHN(evtchn))
1565 unmask_evtchn(evtchn);
1566}
1567
1568static void disable_dynirq(struct irq_data *data)
1569{
1570 int evtchn = evtchn_from_irq(data->irq);
1571
1572 if (VALID_EVTCHN(evtchn))
1573 mask_evtchn(evtchn);
1574}
1575
1576static void ack_dynirq(struct irq_data *data)
1577{
1578 int evtchn = evtchn_from_irq(data->irq);
1579
1580 irq_move_irq(data);
1581
1582 if (VALID_EVTCHN(evtchn))
1583 clear_evtchn(evtchn);
1584}
1585
1586static void mask_ack_dynirq(struct irq_data *data)
1587{
1588 disable_dynirq(data);
1589 ack_dynirq(data);
1590}
1591
1592static int retrigger_dynirq(struct irq_data *data)
1593{
1594 int evtchn = evtchn_from_irq(data->irq);
1595 struct shared_info *sh = HYPERVISOR_shared_info;
1596 int ret = 0;
1597
1598 if (VALID_EVTCHN(evtchn)) {
1599 int masked;
1600
1601 masked = sync_test_and_set_bit(evtchn, BM(sh->evtchn_mask));
1602 sync_set_bit(evtchn, BM(sh->evtchn_pending));
1603 if (!masked)
1604 unmask_evtchn(evtchn);
1605 ret = 1;
1606 }
1607
1608 return ret;
1609}
1610
1611static void restore_pirqs(void)
1612{
1613 int pirq, rc, irq, gsi;
1614 struct physdev_map_pirq map_irq;
1615 struct irq_info *info;
1616
1617 list_for_each_entry(info, &xen_irq_list_head, list) {
1618 if (info->type != IRQT_PIRQ)
1619 continue;
1620
1621 pirq = info->u.pirq.pirq;
1622 gsi = info->u.pirq.gsi;
1623 irq = info->irq;
1624
1625
1626
1627 if (!gsi)
1628 continue;
1629
1630 map_irq.domid = DOMID_SELF;
1631 map_irq.type = MAP_PIRQ_TYPE_GSI;
1632 map_irq.index = gsi;
1633 map_irq.pirq = pirq;
1634
1635 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1636 if (rc) {
1637 pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1638 gsi, irq, pirq, rc);
1639 xen_free_irq(irq);
1640 continue;
1641 }
1642
1643 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1644
1645 __startup_pirq(irq);
1646 }
1647}
1648
1649static void restore_cpu_virqs(unsigned int cpu)
1650{
1651 struct evtchn_bind_virq bind_virq;
1652 int virq, irq, evtchn;
1653
1654 for (virq = 0; virq < NR_VIRQS; virq++) {
1655 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1656 continue;
1657
1658 BUG_ON(virq_from_irq(irq) != virq);
1659
1660
1661 bind_virq.virq = virq;
1662 bind_virq.vcpu = xen_vcpu_nr(cpu);
1663 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1664 &bind_virq) != 0)
1665 BUG();
1666 evtchn = bind_virq.port;
1667
1668
1669 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
1670 bind_evtchn_to_cpu(evtchn, cpu);
1671 }
1672}
1673
1674static void restore_cpu_ipis(unsigned int cpu)
1675{
1676 struct evtchn_bind_ipi bind_ipi;
1677 int ipi, irq, evtchn;
1678
1679 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1680 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1681 continue;
1682
1683 BUG_ON(ipi_from_irq(irq) != ipi);
1684
1685
1686 bind_ipi.vcpu = xen_vcpu_nr(cpu);
1687 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1688 &bind_ipi) != 0)
1689 BUG();
1690 evtchn = bind_ipi.port;
1691
1692
1693 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
1694 bind_evtchn_to_cpu(evtchn, cpu);
1695 }
1696}
1697
1698
1699void xen_clear_irq_pending(int irq)
1700{
1701 int evtchn = evtchn_from_irq(irq);
1702
1703 if (VALID_EVTCHN(evtchn))
1704 clear_evtchn(evtchn);
1705}
1706EXPORT_SYMBOL(xen_clear_irq_pending);
1707void xen_set_irq_pending(int irq)
1708{
1709 int evtchn = evtchn_from_irq(irq);
1710
1711 if (VALID_EVTCHN(evtchn))
1712 set_evtchn(evtchn);
1713}
1714
1715bool xen_test_irq_pending(int irq)
1716{
1717 int evtchn = evtchn_from_irq(irq);
1718 bool ret = false;
1719
1720 if (VALID_EVTCHN(evtchn))
1721 ret = test_evtchn(evtchn);
1722
1723 return ret;
1724}
1725
1726
1727
1728void xen_poll_irq_timeout(int irq, u64 timeout)
1729{
1730 evtchn_port_t evtchn = evtchn_from_irq(irq);
1731
1732 if (VALID_EVTCHN(evtchn)) {
1733 struct sched_poll poll;
1734
1735 poll.nr_ports = 1;
1736 poll.timeout = timeout;
1737 set_xen_guest_handle(poll.ports, &evtchn);
1738
1739 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1740 BUG();
1741 }
1742}
1743EXPORT_SYMBOL(xen_poll_irq_timeout);
1744
1745
1746void xen_poll_irq(int irq)
1747{
1748 xen_poll_irq_timeout(irq, 0 );
1749}
1750
1751
1752int xen_test_irq_shared(int irq)
1753{
1754 struct irq_info *info = info_for_irq(irq);
1755 struct physdev_irq_status_query irq_status;
1756
1757 if (WARN_ON(!info))
1758 return -ENOENT;
1759
1760 irq_status.irq = info->u.pirq.pirq;
1761
1762 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1763 return 0;
1764 return !(irq_status.flags & XENIRQSTAT_shared);
1765}
1766EXPORT_SYMBOL_GPL(xen_test_irq_shared);
1767
1768void xen_irq_resume(void)
1769{
1770 unsigned int cpu, evtchn;
1771 struct irq_info *info;
1772
1773 init_evtchn_cpu_bindings();
1774
1775
1776 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1777 mask_evtchn(evtchn);
1778
1779
1780 list_for_each_entry(info, &xen_irq_list_head, list)
1781 info->evtchn = 0;
1782
1783 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1784 evtchn_to_irq[evtchn] = -1;
1785
1786 for_each_possible_cpu(cpu) {
1787 restore_cpu_virqs(cpu);
1788 restore_cpu_ipis(cpu);
1789 }
1790
1791 restore_pirqs();
1792}
1793
1794static struct irq_chip xen_dynamic_chip __read_mostly = {
1795 .name = "xen-dyn",
1796
1797 .irq_disable = disable_dynirq,
1798 .irq_mask = disable_dynirq,
1799 .irq_unmask = enable_dynirq,
1800
1801 .irq_ack = ack_dynirq,
1802 .irq_mask_ack = mask_ack_dynirq,
1803
1804 .irq_set_affinity = set_affinity_irq,
1805 .irq_retrigger = retrigger_dynirq,
1806};
1807
1808static struct irq_chip xen_pirq_chip __read_mostly = {
1809 .name = "xen-pirq",
1810
1811 .irq_startup = startup_pirq,
1812 .irq_shutdown = shutdown_pirq,
1813 .irq_enable = enable_pirq,
1814 .irq_disable = disable_pirq,
1815
1816 .irq_mask = disable_dynirq,
1817 .irq_unmask = enable_dynirq,
1818
1819 .irq_ack = eoi_pirq,
1820 .irq_eoi = eoi_pirq,
1821 .irq_mask_ack = mask_ack_pirq,
1822
1823 .irq_set_affinity = set_affinity_irq,
1824
1825 .irq_retrigger = retrigger_dynirq,
1826};
1827
1828static struct irq_chip xen_percpu_chip __read_mostly = {
1829 .name = "xen-percpu",
1830
1831 .irq_disable = disable_dynirq,
1832 .irq_mask = disable_dynirq,
1833 .irq_unmask = enable_dynirq,
1834
1835 .irq_ack = ack_dynirq,
1836};
1837
1838int xen_set_callback_via(uint64_t via)
1839{
1840 struct xen_hvm_param a;
1841 a.domid = DOMID_SELF;
1842 a.index = HVM_PARAM_CALLBACK_IRQ;
1843 a.value = via;
1844 return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1845}
1846EXPORT_SYMBOL_GPL(xen_set_callback_via);
1847
1848#ifdef CONFIG_XEN_PVHVM
1849
1850
1851
1852void xen_callback_vector(void)
1853{
1854 int rc;
1855 uint64_t callback_via;
1856 if (xen_have_vector_callback) {
1857 callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
1858 rc = xen_set_callback_via(callback_via);
1859 if (rc) {
1860 pr_err("Request for Xen HVM callback vector failed\n");
1861 xen_have_vector_callback = 0;
1862 return;
1863 }
1864 pr_info("Xen HVM callback vector for event delivery is enabled\n");
1865
1866 if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
1867 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
1868 xen_hvm_callback_vector);
1869 }
1870}
1871#else
1872void xen_callback_vector(void) {}
1873#endif
1874
1875void __init xen_init_IRQ(void)
1876{
1877 int i;
1878
1879 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
1880 GFP_KERNEL);
1881 BUG_ON(!evtchn_to_irq);
1882 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1883 evtchn_to_irq[i] = -1;
1884
1885 init_evtchn_cpu_bindings();
1886
1887
1888 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1889 mask_evtchn(i);
1890
1891 pirq_needs_eoi = pirq_needs_eoi_flag;
1892
1893#ifdef CONFIG_X86
1894 if (xen_hvm_domain()) {
1895 xen_callback_vector();
1896 native_init_IRQ();
1897
1898
1899 pci_xen_hvm_init();
1900 } else {
1901 int rc;
1902 struct physdev_pirq_eoi_gmfn eoi_gmfn;
1903
1904 irq_ctx_init(smp_processor_id());
1905 if (xen_initial_domain())
1906 pci_xen_initial_domain();
1907
1908 pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
1909 eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map);
1910 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
1911 if (rc != 0) {
1912 free_page((unsigned long) pirq_eoi_map);
1913 pirq_eoi_map = NULL;
1914 } else
1915 pirq_needs_eoi = pirq_check_eoi_map;
1916 }
1917#endif
1918}
1919