linux/drivers/xen/events.c
<<
>>
Prefs
   1/*
   2 * Xen event channels
   3 *
   4 * Xen models interrupts with abstract event channels.  Because each
   5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
   6 * must dynamically map irqs<->event channels.  The event channels
   7 * interface with the rest of the kernel by defining a xen interrupt
   8 * chip.  When an event is recieved, it is mapped to an irq and sent
   9 * through the normal interrupt processing path.
  10 *
  11 * There are four kinds of events which can be mapped to an event
  12 * channel:
  13 *
  14 * 1. Inter-domain notifications.  This includes all the virtual
  15 *    device events, since they're driven by front-ends in another domain
  16 *    (typically dom0).
  17 * 2. VIRQs, typically used for timers.  These are per-cpu events.
  18 * 3. IPIs.
  19 * 4. Hardware interrupts. Not supported at present.
  20 *
  21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  22 */
  23
  24#include <linux/linkage.h>
  25#include <linux/interrupt.h>
  26#include <linux/irq.h>
  27#include <linux/module.h>
  28#include <linux/string.h>
  29#include <linux/bootmem.h>
  30
  31#include <asm/ptrace.h>
  32#include <asm/irq.h>
  33#include <asm/idle.h>
  34#include <asm/sync_bitops.h>
  35#include <asm/xen/hypercall.h>
  36#include <asm/xen/hypervisor.h>
  37
  38#include <xen/xen-ops.h>
  39#include <xen/events.h>
  40#include <xen/interface/xen.h>
  41#include <xen/interface/event_channel.h>
  42
  43/*
  44 * This lock protects updates to the following mapping and reference-count
  45 * arrays. The lock does not need to be acquired to read the mapping tables.
  46 */
  47static DEFINE_SPINLOCK(irq_mapping_update_lock);
  48
  49/* IRQ <-> VIRQ mapping. */
  50static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
  51
  52/* IRQ <-> IPI mapping */
  53static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
  54
  55/* Interrupt types. */
  56enum xen_irq_type {
  57        IRQT_UNBOUND = 0,
  58        IRQT_PIRQ,
  59        IRQT_VIRQ,
  60        IRQT_IPI,
  61        IRQT_EVTCHN
  62};
  63
  64/*
  65 * Packed IRQ information:
  66 * type - enum xen_irq_type
  67 * event channel - irq->event channel mapping
  68 * cpu - cpu this event channel is bound to
  69 * index - type-specific information:
  70 *    PIRQ - vector, with MSB being "needs EIO"
  71 *    VIRQ - virq number
  72 *    IPI - IPI vector
  73 *    EVTCHN -
  74 */
  75struct irq_info
  76{
  77        enum xen_irq_type type; /* type */
  78        unsigned short evtchn;  /* event channel */
  79        unsigned short cpu;     /* cpu bound */
  80
  81        union {
  82                unsigned short virq;
  83                enum ipi_vector ipi;
  84                struct {
  85                        unsigned short gsi;
  86                        unsigned short vector;
  87                } pirq;
  88        } u;
  89};
  90
  91static struct irq_info irq_info[NR_IRQS];
  92
  93static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
  94        [0 ... NR_EVENT_CHANNELS-1] = -1
  95};
  96struct cpu_evtchn_s {
  97        unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
  98};
  99static struct cpu_evtchn_s *cpu_evtchn_mask_p;
 100static inline unsigned long *cpu_evtchn_mask(int cpu)
 101{
 102        return cpu_evtchn_mask_p[cpu].bits;
 103}
 104
 105/* Xen will never allocate port zero for any purpose. */
 106#define VALID_EVTCHN(chn)       ((chn) != 0)
 107
 108static struct irq_chip xen_dynamic_chip;
 109
 110/* Constructor for packed IRQ information. */
 111static struct irq_info mk_unbound_info(void)
 112{
 113        return (struct irq_info) { .type = IRQT_UNBOUND };
 114}
 115
 116static struct irq_info mk_evtchn_info(unsigned short evtchn)
 117{
 118        return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
 119                        .cpu = 0 };
 120}
 121
 122static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
 123{
 124        return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
 125                        .cpu = 0, .u.ipi = ipi };
 126}
 127
 128static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
 129{
 130        return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
 131                        .cpu = 0, .u.virq = virq };
 132}
 133
 134static struct irq_info mk_pirq_info(unsigned short evtchn,
 135                                    unsigned short gsi, unsigned short vector)
 136{
 137        return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
 138                        .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } };
 139}
 140
 141/*
 142 * Accessors for packed IRQ information.
 143 */
 144static struct irq_info *info_for_irq(unsigned irq)
 145{
 146        return &irq_info[irq];
 147}
 148
 149static unsigned int evtchn_from_irq(unsigned irq)
 150{
 151        return info_for_irq(irq)->evtchn;
 152}
 153
 154unsigned irq_from_evtchn(unsigned int evtchn)
 155{
 156        return evtchn_to_irq[evtchn];
 157}
 158EXPORT_SYMBOL_GPL(irq_from_evtchn);
 159
 160static enum ipi_vector ipi_from_irq(unsigned irq)
 161{
 162        struct irq_info *info = info_for_irq(irq);
 163
 164        BUG_ON(info == NULL);
 165        BUG_ON(info->type != IRQT_IPI);
 166
 167        return info->u.ipi;
 168}
 169
 170static unsigned virq_from_irq(unsigned irq)
 171{
 172        struct irq_info *info = info_for_irq(irq);
 173
 174        BUG_ON(info == NULL);
 175        BUG_ON(info->type != IRQT_VIRQ);
 176
 177        return info->u.virq;
 178}
 179
 180static unsigned gsi_from_irq(unsigned irq)
 181{
 182        struct irq_info *info = info_for_irq(irq);
 183
 184        BUG_ON(info == NULL);
 185        BUG_ON(info->type != IRQT_PIRQ);
 186
 187        return info->u.pirq.gsi;
 188}
 189
 190static unsigned vector_from_irq(unsigned irq)
 191{
 192        struct irq_info *info = info_for_irq(irq);
 193
 194        BUG_ON(info == NULL);
 195        BUG_ON(info->type != IRQT_PIRQ);
 196
 197        return info->u.pirq.vector;
 198}
 199
 200static enum xen_irq_type type_from_irq(unsigned irq)
 201{
 202        return info_for_irq(irq)->type;
 203}
 204
 205static unsigned cpu_from_irq(unsigned irq)
 206{
 207        return info_for_irq(irq)->cpu;
 208}
 209
 210static unsigned int cpu_from_evtchn(unsigned int evtchn)
 211{
 212        int irq = evtchn_to_irq[evtchn];
 213        unsigned ret = 0;
 214
 215        if (irq != -1)
 216                ret = cpu_from_irq(irq);
 217
 218        return ret;
 219}
 220
 221static inline unsigned long active_evtchns(unsigned int cpu,
 222                                           struct shared_info *sh,
 223                                           unsigned int idx)
 224{
 225        return (sh->evtchn_pending[idx] &
 226                cpu_evtchn_mask(cpu)[idx] &
 227                ~sh->evtchn_mask[idx]);
 228}
 229
 230static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
 231{
 232        int irq = evtchn_to_irq[chn];
 233
 234        BUG_ON(irq == -1);
 235#ifdef CONFIG_SMP
 236        cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
 237#endif
 238
 239        __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
 240        __set_bit(chn, cpu_evtchn_mask(cpu));
 241
 242        irq_info[irq].cpu = cpu;
 243}
 244
 245static void init_evtchn_cpu_bindings(void)
 246{
 247#ifdef CONFIG_SMP
 248        struct irq_desc *desc;
 249        int i;
 250
 251        /* By default all event channels notify CPU#0. */
 252        for_each_irq_desc(i, desc) {
 253                cpumask_copy(desc->affinity, cpumask_of(0));
 254        }
 255#endif
 256
 257        memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
 258}
 259
 260static inline void clear_evtchn(int port)
 261{
 262        struct shared_info *s = HYPERVISOR_shared_info;
 263        sync_clear_bit(port, &s->evtchn_pending[0]);
 264}
 265
 266static inline void set_evtchn(int port)
 267{
 268        struct shared_info *s = HYPERVISOR_shared_info;
 269        sync_set_bit(port, &s->evtchn_pending[0]);
 270}
 271
 272static inline int test_evtchn(int port)
 273{
 274        struct shared_info *s = HYPERVISOR_shared_info;
 275        return sync_test_bit(port, &s->evtchn_pending[0]);
 276}
 277
 278
 279/**
 280 * notify_remote_via_irq - send event to remote end of event channel via irq
 281 * @irq: irq of event channel to send event to
 282 *
 283 * Unlike notify_remote_via_evtchn(), this is safe to use across
 284 * save/restore. Notifications on a broken connection are silently
 285 * dropped.
 286 */
 287void notify_remote_via_irq(int irq)
 288{
 289        int evtchn = evtchn_from_irq(irq);
 290
 291        if (VALID_EVTCHN(evtchn))
 292                notify_remote_via_evtchn(evtchn);
 293}
 294EXPORT_SYMBOL_GPL(notify_remote_via_irq);
 295
 296static void mask_evtchn(int port)
 297{
 298        struct shared_info *s = HYPERVISOR_shared_info;
 299        sync_set_bit(port, &s->evtchn_mask[0]);
 300}
 301
 302static void unmask_evtchn(int port)
 303{
 304        struct shared_info *s = HYPERVISOR_shared_info;
 305        unsigned int cpu = get_cpu();
 306
 307        BUG_ON(!irqs_disabled());
 308
 309        /* Slow path (hypercall) if this is a non-local port. */
 310        if (unlikely(cpu != cpu_from_evtchn(port))) {
 311                struct evtchn_unmask unmask = { .port = port };
 312                (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
 313        } else {
 314                struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
 315
 316                sync_clear_bit(port, &s->evtchn_mask[0]);
 317
 318                /*
 319                 * The following is basically the equivalent of
 320                 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
 321                 * the interrupt edge' if the channel is masked.
 322                 */
 323                if (sync_test_bit(port, &s->evtchn_pending[0]) &&
 324                    !sync_test_and_set_bit(port / BITS_PER_LONG,
 325                                           &vcpu_info->evtchn_pending_sel))
 326                        vcpu_info->evtchn_upcall_pending = 1;
 327        }
 328
 329        put_cpu();
 330}
 331
 332static int find_unbound_irq(void)
 333{
 334        int irq;
 335        struct irq_desc *desc;
 336
 337        for (irq = 0; irq < nr_irqs; irq++)
 338                if (irq_info[irq].type == IRQT_UNBOUND)
 339                        break;
 340
 341        if (irq == nr_irqs)
 342                panic("No available IRQ to bind to: increase nr_irqs!\n");
 343
 344        desc = irq_to_desc_alloc_node(irq, 0);
 345        if (WARN_ON(desc == NULL))
 346                return -1;
 347
 348        dynamic_irq_init(irq);
 349
 350        return irq;
 351}
 352
 353int bind_evtchn_to_irq(unsigned int evtchn)
 354{
 355        int irq;
 356
 357        spin_lock(&irq_mapping_update_lock);
 358
 359        irq = evtchn_to_irq[evtchn];
 360
 361        if (irq == -1) {
 362                irq = find_unbound_irq();
 363
 364                set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
 365                                              handle_level_irq, "event");
 366
 367                evtchn_to_irq[evtchn] = irq;
 368                irq_info[irq] = mk_evtchn_info(evtchn);
 369        }
 370
 371        spin_unlock(&irq_mapping_update_lock);
 372
 373        return irq;
 374}
 375EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
 376
 377static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
 378{
 379        struct evtchn_bind_ipi bind_ipi;
 380        int evtchn, irq;
 381
 382        spin_lock(&irq_mapping_update_lock);
 383
 384        irq = per_cpu(ipi_to_irq, cpu)[ipi];
 385
 386        if (irq == -1) {
 387                irq = find_unbound_irq();
 388                if (irq < 0)
 389                        goto out;
 390
 391                set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
 392                                              handle_level_irq, "ipi");
 393
 394                bind_ipi.vcpu = cpu;
 395                if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
 396                                                &bind_ipi) != 0)
 397                        BUG();
 398                evtchn = bind_ipi.port;
 399
 400                evtchn_to_irq[evtchn] = irq;
 401                irq_info[irq] = mk_ipi_info(evtchn, ipi);
 402                per_cpu(ipi_to_irq, cpu)[ipi] = irq;
 403
 404                bind_evtchn_to_cpu(evtchn, cpu);
 405        }
 406
 407 out:
 408        spin_unlock(&irq_mapping_update_lock);
 409        return irq;
 410}
 411
 412
 413static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
 414{
 415        struct evtchn_bind_virq bind_virq;
 416        int evtchn, irq;
 417
 418        spin_lock(&irq_mapping_update_lock);
 419
 420        irq = per_cpu(virq_to_irq, cpu)[virq];
 421
 422        if (irq == -1) {
 423                bind_virq.virq = virq;
 424                bind_virq.vcpu = cpu;
 425                if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
 426                                                &bind_virq) != 0)
 427                        BUG();
 428                evtchn = bind_virq.port;
 429
 430                irq = find_unbound_irq();
 431
 432                set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
 433                                              handle_level_irq, "virq");
 434
 435                evtchn_to_irq[evtchn] = irq;
 436                irq_info[irq] = mk_virq_info(evtchn, virq);
 437
 438                per_cpu(virq_to_irq, cpu)[virq] = irq;
 439
 440                bind_evtchn_to_cpu(evtchn, cpu);
 441        }
 442
 443        spin_unlock(&irq_mapping_update_lock);
 444
 445        return irq;
 446}
 447
 448static void unbind_from_irq(unsigned int irq)
 449{
 450        struct evtchn_close close;
 451        int evtchn = evtchn_from_irq(irq);
 452
 453        spin_lock(&irq_mapping_update_lock);
 454
 455        if (VALID_EVTCHN(evtchn)) {
 456                close.port = evtchn;
 457                if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
 458                        BUG();
 459
 460                switch (type_from_irq(irq)) {
 461                case IRQT_VIRQ:
 462                        per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
 463                                [virq_from_irq(irq)] = -1;
 464                        break;
 465                case IRQT_IPI:
 466                        per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
 467                                [ipi_from_irq(irq)] = -1;
 468                        break;
 469                default:
 470                        break;
 471                }
 472
 473                /* Closed ports are implicitly re-bound to VCPU0. */
 474                bind_evtchn_to_cpu(evtchn, 0);
 475
 476                evtchn_to_irq[evtchn] = -1;
 477                irq_info[irq] = mk_unbound_info();
 478
 479                dynamic_irq_cleanup(irq);
 480        }
 481
 482        spin_unlock(&irq_mapping_update_lock);
 483}
 484
 485int bind_evtchn_to_irqhandler(unsigned int evtchn,
 486                              irq_handler_t handler,
 487                              unsigned long irqflags,
 488                              const char *devname, void *dev_id)
 489{
 490        unsigned int irq;
 491        int retval;
 492
 493        irq = bind_evtchn_to_irq(evtchn);
 494        retval = request_irq(irq, handler, irqflags, devname, dev_id);
 495        if (retval != 0) {
 496                unbind_from_irq(irq);
 497                return retval;
 498        }
 499
 500        return irq;
 501}
 502EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
 503
 504int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
 505                            irq_handler_t handler,
 506                            unsigned long irqflags, const char *devname, void *dev_id)
 507{
 508        unsigned int irq;
 509        int retval;
 510
 511        irq = bind_virq_to_irq(virq, cpu);
 512        retval = request_irq(irq, handler, irqflags, devname, dev_id);
 513        if (retval != 0) {
 514                unbind_from_irq(irq);
 515                return retval;
 516        }
 517
 518        return irq;
 519}
 520EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
 521
 522int bind_ipi_to_irqhandler(enum ipi_vector ipi,
 523                           unsigned int cpu,
 524                           irq_handler_t handler,
 525                           unsigned long irqflags,
 526                           const char *devname,
 527                           void *dev_id)
 528{
 529        int irq, retval;
 530
 531        irq = bind_ipi_to_irq(ipi, cpu);
 532        if (irq < 0)
 533                return irq;
 534
 535        retval = request_irq(irq, handler, irqflags, devname, dev_id);
 536        if (retval != 0) {
 537                unbind_from_irq(irq);
 538                return retval;
 539        }
 540
 541        return irq;
 542}
 543
 544void unbind_from_irqhandler(unsigned int irq, void *dev_id)
 545{
 546        free_irq(irq, dev_id);
 547        unbind_from_irq(irq);
 548}
 549EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
 550
 551void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
 552{
 553        int irq = per_cpu(ipi_to_irq, cpu)[vector];
 554        BUG_ON(irq < 0);
 555        notify_remote_via_irq(irq);
 556}
 557
 558irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
 559{
 560        struct shared_info *sh = HYPERVISOR_shared_info;
 561        int cpu = smp_processor_id();
 562        int i;
 563        unsigned long flags;
 564        static DEFINE_SPINLOCK(debug_lock);
 565
 566        spin_lock_irqsave(&debug_lock, flags);
 567
 568        printk("vcpu %d\n  ", cpu);
 569
 570        for_each_online_cpu(i) {
 571                struct vcpu_info *v = per_cpu(xen_vcpu, i);
 572                printk("%d: masked=%d pending=%d event_sel %08lx\n  ", i,
 573                        (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
 574                        v->evtchn_upcall_pending,
 575                        v->evtchn_pending_sel);
 576        }
 577        printk("pending:\n   ");
 578        for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
 579                printk("%08lx%s", sh->evtchn_pending[i],
 580                        i % 8 == 0 ? "\n   " : " ");
 581        printk("\nmasks:\n   ");
 582        for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
 583                printk("%08lx%s", sh->evtchn_mask[i],
 584                        i % 8 == 0 ? "\n   " : " ");
 585
 586        printk("\nunmasked:\n   ");
 587        for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
 588                printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
 589                        i % 8 == 0 ? "\n   " : " ");
 590
 591        printk("\npending list:\n");
 592        for(i = 0; i < NR_EVENT_CHANNELS; i++) {
 593                if (sync_test_bit(i, sh->evtchn_pending)) {
 594                        printk("  %d: event %d -> irq %d\n",
 595                               cpu_from_evtchn(i), i,
 596                               evtchn_to_irq[i]);
 597                }
 598        }
 599
 600        spin_unlock_irqrestore(&debug_lock, flags);
 601
 602        return IRQ_HANDLED;
 603}
 604
 605static DEFINE_PER_CPU(unsigned, xed_nesting_count);
 606
 607/*
 608 * Search the CPUs pending events bitmasks.  For each one found, map
 609 * the event number to an irq, and feed it into do_IRQ() for
 610 * handling.
 611 *
 612 * Xen uses a two-level bitmap to speed searching.  The first level is
 613 * a bitset of words which contain pending event bits.  The second
 614 * level is a bitset of pending events themselves.
 615 */
 616void xen_evtchn_do_upcall(struct pt_regs *regs)
 617{
 618        int cpu = get_cpu();
 619        struct pt_regs *old_regs = set_irq_regs(regs);
 620        struct shared_info *s = HYPERVISOR_shared_info;
 621        struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
 622        unsigned count;
 623
 624        exit_idle();
 625        irq_enter();
 626
 627        do {
 628                unsigned long pending_words;
 629
 630                vcpu_info->evtchn_upcall_pending = 0;
 631
 632                if (__get_cpu_var(xed_nesting_count)++)
 633                        goto out;
 634
 635#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
 636                /* Clear master flag /before/ clearing selector flag. */
 637                wmb();
 638#endif
 639                pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
 640                while (pending_words != 0) {
 641                        unsigned long pending_bits;
 642                        int word_idx = __ffs(pending_words);
 643                        pending_words &= ~(1UL << word_idx);
 644
 645                        while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
 646                                int bit_idx = __ffs(pending_bits);
 647                                int port = (word_idx * BITS_PER_LONG) + bit_idx;
 648                                int irq = evtchn_to_irq[port];
 649
 650                                if (irq != -1)
 651                                        handle_irq(irq, regs);
 652                        }
 653                }
 654
 655                BUG_ON(!irqs_disabled());
 656
 657                count = __get_cpu_var(xed_nesting_count);
 658                __get_cpu_var(xed_nesting_count) = 0;
 659        } while(count != 1);
 660
 661out:
 662        irq_exit();
 663        set_irq_regs(old_regs);
 664
 665        put_cpu();
 666}
 667
 668/* Rebind a new event channel to an existing irq. */
 669void rebind_evtchn_irq(int evtchn, int irq)
 670{
 671        struct irq_info *info = info_for_irq(irq);
 672
 673        /* Make sure the irq is masked, since the new event channel
 674           will also be masked. */
 675        disable_irq(irq);
 676
 677        spin_lock(&irq_mapping_update_lock);
 678
 679        /* After resume the irq<->evtchn mappings are all cleared out */
 680        BUG_ON(evtchn_to_irq[evtchn] != -1);
 681        /* Expect irq to have been bound before,
 682           so there should be a proper type */
 683        BUG_ON(info->type == IRQT_UNBOUND);
 684
 685        evtchn_to_irq[evtchn] = irq;
 686        irq_info[irq] = mk_evtchn_info(evtchn);
 687
 688        spin_unlock(&irq_mapping_update_lock);
 689
 690        /* new event channels are always bound to cpu 0 */
 691        irq_set_affinity(irq, cpumask_of(0));
 692
 693        /* Unmask the event channel. */
 694        enable_irq(irq);
 695}
 696
 697/* Rebind an evtchn so that it gets delivered to a specific cpu */
 698static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
 699{
 700        struct evtchn_bind_vcpu bind_vcpu;
 701        int evtchn = evtchn_from_irq(irq);
 702
 703        if (!VALID_EVTCHN(evtchn))
 704                return -1;
 705
 706        /* Send future instances of this interrupt to other vcpu. */
 707        bind_vcpu.port = evtchn;
 708        bind_vcpu.vcpu = tcpu;
 709
 710        /*
 711         * If this fails, it usually just indicates that we're dealing with a
 712         * virq or IPI channel, which don't actually need to be rebound. Ignore
 713         * it, but don't do the xenlinux-level rebind in that case.
 714         */
 715        if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
 716                bind_evtchn_to_cpu(evtchn, tcpu);
 717
 718        return 0;
 719}
 720
 721static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
 722{
 723        unsigned tcpu = cpumask_first(dest);
 724
 725        return rebind_irq_to_cpu(irq, tcpu);
 726}
 727
 728int resend_irq_on_evtchn(unsigned int irq)
 729{
 730        int masked, evtchn = evtchn_from_irq(irq);
 731        struct shared_info *s = HYPERVISOR_shared_info;
 732
 733        if (!VALID_EVTCHN(evtchn))
 734                return 1;
 735
 736        masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
 737        sync_set_bit(evtchn, s->evtchn_pending);
 738        if (!masked)
 739                unmask_evtchn(evtchn);
 740
 741        return 1;
 742}
 743
 744static void enable_dynirq(unsigned int irq)
 745{
 746        int evtchn = evtchn_from_irq(irq);
 747
 748        if (VALID_EVTCHN(evtchn))
 749                unmask_evtchn(evtchn);
 750}
 751
 752static void disable_dynirq(unsigned int irq)
 753{
 754        int evtchn = evtchn_from_irq(irq);
 755
 756        if (VALID_EVTCHN(evtchn))
 757                mask_evtchn(evtchn);
 758}
 759
 760static void ack_dynirq(unsigned int irq)
 761{
 762        int evtchn = evtchn_from_irq(irq);
 763
 764        move_native_irq(irq);
 765
 766        if (VALID_EVTCHN(evtchn))
 767                clear_evtchn(evtchn);
 768}
 769
 770static int retrigger_dynirq(unsigned int irq)
 771{
 772        int evtchn = evtchn_from_irq(irq);
 773        struct shared_info *sh = HYPERVISOR_shared_info;
 774        int ret = 0;
 775
 776        if (VALID_EVTCHN(evtchn)) {
 777                int masked;
 778
 779                masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
 780                sync_set_bit(evtchn, sh->evtchn_pending);
 781                if (!masked)
 782                        unmask_evtchn(evtchn);
 783                ret = 1;
 784        }
 785
 786        return ret;
 787}
 788
 789static void restore_cpu_virqs(unsigned int cpu)
 790{
 791        struct evtchn_bind_virq bind_virq;
 792        int virq, irq, evtchn;
 793
 794        for (virq = 0; virq < NR_VIRQS; virq++) {
 795                if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
 796                        continue;
 797
 798                BUG_ON(virq_from_irq(irq) != virq);
 799
 800                /* Get a new binding from Xen. */
 801                bind_virq.virq = virq;
 802                bind_virq.vcpu = cpu;
 803                if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
 804                                                &bind_virq) != 0)
 805                        BUG();
 806                evtchn = bind_virq.port;
 807
 808                /* Record the new mapping. */
 809                evtchn_to_irq[evtchn] = irq;
 810                irq_info[irq] = mk_virq_info(evtchn, virq);
 811                bind_evtchn_to_cpu(evtchn, cpu);
 812
 813                /* Ready for use. */
 814                unmask_evtchn(evtchn);
 815        }
 816}
 817
 818static void restore_cpu_ipis(unsigned int cpu)
 819{
 820        struct evtchn_bind_ipi bind_ipi;
 821        int ipi, irq, evtchn;
 822
 823        for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
 824                if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
 825                        continue;
 826
 827                BUG_ON(ipi_from_irq(irq) != ipi);
 828
 829                /* Get a new binding from Xen. */
 830                bind_ipi.vcpu = cpu;
 831                if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
 832                                                &bind_ipi) != 0)
 833                        BUG();
 834                evtchn = bind_ipi.port;
 835
 836                /* Record the new mapping. */
 837                evtchn_to_irq[evtchn] = irq;
 838                irq_info[irq] = mk_ipi_info(evtchn, ipi);
 839                bind_evtchn_to_cpu(evtchn, cpu);
 840
 841                /* Ready for use. */
 842                unmask_evtchn(evtchn);
 843
 844        }
 845}
 846
 847/* Clear an irq's pending state, in preparation for polling on it */
 848void xen_clear_irq_pending(int irq)
 849{
 850        int evtchn = evtchn_from_irq(irq);
 851
 852        if (VALID_EVTCHN(evtchn))
 853                clear_evtchn(evtchn);
 854}
 855
 856void xen_set_irq_pending(int irq)
 857{
 858        int evtchn = evtchn_from_irq(irq);
 859
 860        if (VALID_EVTCHN(evtchn))
 861                set_evtchn(evtchn);
 862}
 863
 864bool xen_test_irq_pending(int irq)
 865{
 866        int evtchn = evtchn_from_irq(irq);
 867        bool ret = false;
 868
 869        if (VALID_EVTCHN(evtchn))
 870                ret = test_evtchn(evtchn);
 871
 872        return ret;
 873}
 874
 875/* Poll waiting for an irq to become pending.  In the usual case, the
 876   irq will be disabled so it won't deliver an interrupt. */
 877void xen_poll_irq(int irq)
 878{
 879        evtchn_port_t evtchn = evtchn_from_irq(irq);
 880
 881        if (VALID_EVTCHN(evtchn)) {
 882                struct sched_poll poll;
 883
 884                poll.nr_ports = 1;
 885                poll.timeout = 0;
 886                set_xen_guest_handle(poll.ports, &evtchn);
 887
 888                if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
 889                        BUG();
 890        }
 891}
 892
 893void xen_irq_resume(void)
 894{
 895        unsigned int cpu, irq, evtchn;
 896
 897        init_evtchn_cpu_bindings();
 898
 899        /* New event-channel space is not 'live' yet. */
 900        for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
 901                mask_evtchn(evtchn);
 902
 903        /* No IRQ <-> event-channel mappings. */
 904        for (irq = 0; irq < nr_irqs; irq++)
 905                irq_info[irq].evtchn = 0; /* zap event-channel binding */
 906
 907        for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
 908                evtchn_to_irq[evtchn] = -1;
 909
 910        for_each_possible_cpu(cpu) {
 911                restore_cpu_virqs(cpu);
 912                restore_cpu_ipis(cpu);
 913        }
 914}
 915
 916static struct irq_chip xen_dynamic_chip __read_mostly = {
 917        .name           = "xen-dyn",
 918
 919        .disable        = disable_dynirq,
 920        .mask           = disable_dynirq,
 921        .unmask         = enable_dynirq,
 922
 923        .ack            = ack_dynirq,
 924        .set_affinity   = set_affinity_irq,
 925        .retrigger      = retrigger_dynirq,
 926};
 927
 928void __init xen_init_IRQ(void)
 929{
 930        int i;
 931
 932        cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
 933                                    GFP_KERNEL);
 934        BUG_ON(cpu_evtchn_mask_p == NULL);
 935
 936        init_evtchn_cpu_bindings();
 937
 938        /* No event channels are 'live' right now. */
 939        for (i = 0; i < NR_EVENT_CHANNELS; i++)
 940                mask_evtchn(i);
 941
 942        irq_ctx_init(smp_processor_id());
 943}
 944