linux/arch/ia64/xen/irq_xen.c
<<
>>
Prefs
   1/******************************************************************************
   2 * arch/ia64/xen/irq_xen.c
   3 *
   4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
   5 *                    VA Linux Systems Japan K.K.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  20 *
  21 */
  22
  23#include <linux/cpu.h>
  24
  25#include <xen/interface/xen.h>
  26#include <xen/interface/callback.h>
  27#include <xen/events.h>
  28
  29#include <asm/xen/privop.h>
  30
  31#include "irq_xen.h"
  32
  33/***************************************************************************
  34 * pv_irq_ops
  35 * irq operations
  36 */
  37
  38static int
  39xen_assign_irq_vector(int irq)
  40{
  41        struct physdev_irq irq_op;
  42
  43        irq_op.irq = irq;
  44        if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
  45                return -ENOSPC;
  46
  47        return irq_op.vector;
  48}
  49
  50static void
  51xen_free_irq_vector(int vector)
  52{
  53        struct physdev_irq irq_op;
  54
  55        if (vector < IA64_FIRST_DEVICE_VECTOR ||
  56            vector > IA64_LAST_DEVICE_VECTOR)
  57                return;
  58
  59        irq_op.vector = vector;
  60        if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op))
  61                printk(KERN_WARNING "%s: xen_free_irq_vecotr fail vector=%d\n",
  62                       __func__, vector);
  63}
  64
  65
  66static DEFINE_PER_CPU(int, xen_timer_irq) = -1;
  67static DEFINE_PER_CPU(int, xen_ipi_irq) = -1;
  68static DEFINE_PER_CPU(int, xen_resched_irq) = -1;
  69static DEFINE_PER_CPU(int, xen_cmc_irq) = -1;
  70static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1;
  71static DEFINE_PER_CPU(int, xen_cpep_irq) = -1;
  72#define NAME_SIZE       15
  73static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name);
  74static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name);
  75static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name);
  76static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name);
  77static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name);
  78static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name);
  79#undef NAME_SIZE
  80
  81struct saved_irq {
  82        unsigned int irq;
  83        struct irqaction *action;
  84};
  85/* 16 should be far optimistic value, since only several percpu irqs
  86 * are registered early.
  87 */
  88#define MAX_LATE_IRQ    16
  89static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
  90static unsigned short late_irq_cnt;
  91static unsigned short saved_irq_cnt;
  92static int xen_slab_ready;
  93
  94#ifdef CONFIG_SMP
  95/* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ,
  96 * it ends up to issue several memory accesses upon percpu data and
  97 * thus adds unnecessary traffic to other paths.
  98 */
  99static irqreturn_t
 100xen_dummy_handler(int irq, void *dev_id)
 101{
 102
 103        return IRQ_HANDLED;
 104}
 105
 106static struct irqaction xen_ipi_irqaction = {
 107        .handler =      handle_IPI,
 108        .flags =        IRQF_DISABLED,
 109        .name =         "IPI"
 110};
 111
 112static struct irqaction xen_resched_irqaction = {
 113        .handler =      xen_dummy_handler,
 114        .flags =        IRQF_DISABLED,
 115        .name =         "resched"
 116};
 117
 118static struct irqaction xen_tlb_irqaction = {
 119        .handler =      xen_dummy_handler,
 120        .flags =        IRQF_DISABLED,
 121        .name =         "tlb_flush"
 122};
 123#endif
 124
 125/*
 126 * This is xen version percpu irq registration, which needs bind
 127 * to xen specific evtchn sub-system. One trick here is that xen
 128 * evtchn binding interface depends on kmalloc because related
 129 * port needs to be freed at device/cpu down. So we cache the
 130 * registration on BSP before slab is ready and then deal them
 131 * at later point. For rest instances happening after slab ready,
 132 * we hook them to xen evtchn immediately.
 133 *
 134 * FIXME: MCA is not supported by far, and thus "nomca" boot param is
 135 * required.
 136 */
 137static void
 138__xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
 139                        struct irqaction *action, int save)
 140{
 141        struct irq_desc *desc;
 142        int irq = 0;
 143
 144        if (xen_slab_ready) {
 145                switch (vec) {
 146                case IA64_TIMER_VECTOR:
 147                        snprintf(per_cpu(xen_timer_name, cpu),
 148                                 sizeof(per_cpu(xen_timer_name, cpu)),
 149                                 "%s%d", action->name, cpu);
 150                        irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
 151                                action->handler, action->flags,
 152                                per_cpu(xen_timer_name, cpu), action->dev_id);
 153                        per_cpu(xen_timer_irq, cpu) = irq;
 154                        break;
 155                case IA64_IPI_RESCHEDULE:
 156                        snprintf(per_cpu(xen_resched_name, cpu),
 157                                 sizeof(per_cpu(xen_resched_name, cpu)),
 158                                 "%s%d", action->name, cpu);
 159                        irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
 160                                action->handler, action->flags,
 161                                per_cpu(xen_resched_name, cpu), action->dev_id);
 162                        per_cpu(xen_resched_irq, cpu) = irq;
 163                        break;
 164                case IA64_IPI_VECTOR:
 165                        snprintf(per_cpu(xen_ipi_name, cpu),
 166                                 sizeof(per_cpu(xen_ipi_name, cpu)),
 167                                 "%s%d", action->name, cpu);
 168                        irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
 169                                action->handler, action->flags,
 170                                per_cpu(xen_ipi_name, cpu), action->dev_id);
 171                        per_cpu(xen_ipi_irq, cpu) = irq;
 172                        break;
 173                case IA64_CMC_VECTOR:
 174                        snprintf(per_cpu(xen_cmc_name, cpu),
 175                                 sizeof(per_cpu(xen_cmc_name, cpu)),
 176                                 "%s%d", action->name, cpu);
 177                        irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
 178                                                action->handler,
 179                                                action->flags,
 180                                                per_cpu(xen_cmc_name, cpu),
 181                                                action->dev_id);
 182                        per_cpu(xen_cmc_irq, cpu) = irq;
 183                        break;
 184                case IA64_CMCP_VECTOR:
 185                        snprintf(per_cpu(xen_cmcp_name, cpu),
 186                                 sizeof(per_cpu(xen_cmcp_name, cpu)),
 187                                 "%s%d", action->name, cpu);
 188                        irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
 189                                                action->handler,
 190                                                action->flags,
 191                                                per_cpu(xen_cmcp_name, cpu),
 192                                                action->dev_id);
 193                        per_cpu(xen_cmcp_irq, cpu) = irq;
 194                        break;
 195                case IA64_CPEP_VECTOR:
 196                        snprintf(per_cpu(xen_cpep_name, cpu),
 197                                 sizeof(per_cpu(xen_cpep_name, cpu)),
 198                                 "%s%d", action->name, cpu);
 199                        irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
 200                                                action->handler,
 201                                                action->flags,
 202                                                per_cpu(xen_cpep_name, cpu),
 203                                                action->dev_id);
 204                        per_cpu(xen_cpep_irq, cpu) = irq;
 205                        break;
 206                case IA64_CPE_VECTOR:
 207                case IA64_MCA_RENDEZ_VECTOR:
 208                case IA64_PERFMON_VECTOR:
 209                case IA64_MCA_WAKEUP_VECTOR:
 210                case IA64_SPURIOUS_INT_VECTOR:
 211                        /* No need to complain, these aren't supported. */
 212                        break;
 213                default:
 214                        printk(KERN_WARNING "Percpu irq %d is unsupported "
 215                               "by xen!\n", vec);
 216                        break;
 217                }
 218                BUG_ON(irq < 0);
 219
 220                if (irq > 0) {
 221                        /*
 222                         * Mark percpu.  Without this, migrate_irqs() will
 223                         * mark the interrupt for migrations and trigger it
 224                         * on cpu hotplug.
 225                         */
 226                        desc = irq_desc + irq;
 227                        desc->status |= IRQ_PER_CPU;
 228                }
 229        }
 230
 231        /* For BSP, we cache registered percpu irqs, and then re-walk
 232         * them when initializing APs
 233         */
 234        if (!cpu && save) {
 235                BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
 236                saved_percpu_irqs[saved_irq_cnt].irq = vec;
 237                saved_percpu_irqs[saved_irq_cnt].action = action;
 238                saved_irq_cnt++;
 239                if (!xen_slab_ready)
 240                        late_irq_cnt++;
 241        }
 242}
 243
 244static void
 245xen_register_percpu_irq(ia64_vector vec, struct irqaction *action)
 246{
 247        __xen_register_percpu_irq(smp_processor_id(), vec, action, 1);
 248}
 249
 250static void
 251xen_bind_early_percpu_irq(void)
 252{
 253        int i;
 254
 255        xen_slab_ready = 1;
 256        /* There's no race when accessing this cached array, since only
 257         * BSP will face with such step shortly
 258         */
 259        for (i = 0; i < late_irq_cnt; i++)
 260                __xen_register_percpu_irq(smp_processor_id(),
 261                                          saved_percpu_irqs[i].irq,
 262                                          saved_percpu_irqs[i].action, 0);
 263}
 264
 265/* FIXME: There's no obvious point to check whether slab is ready. So
 266 * a hack is used here by utilizing a late time hook.
 267 */
 268
 269#ifdef CONFIG_HOTPLUG_CPU
 270static int __devinit
 271unbind_evtchn_callback(struct notifier_block *nfb,
 272                       unsigned long action, void *hcpu)
 273{
 274        unsigned int cpu = (unsigned long)hcpu;
 275
 276        if (action == CPU_DEAD) {
 277                /* Unregister evtchn.  */
 278                if (per_cpu(xen_cpep_irq, cpu) >= 0) {
 279                        unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu),
 280                                               NULL);
 281                        per_cpu(xen_cpep_irq, cpu) = -1;
 282                }
 283                if (per_cpu(xen_cmcp_irq, cpu) >= 0) {
 284                        unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu),
 285                                               NULL);
 286                        per_cpu(xen_cmcp_irq, cpu) = -1;
 287                }
 288                if (per_cpu(xen_cmc_irq, cpu) >= 0) {
 289                        unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL);
 290                        per_cpu(xen_cmc_irq, cpu) = -1;
 291                }
 292                if (per_cpu(xen_ipi_irq, cpu) >= 0) {
 293                        unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL);
 294                        per_cpu(xen_ipi_irq, cpu) = -1;
 295                }
 296                if (per_cpu(xen_resched_irq, cpu) >= 0) {
 297                        unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu),
 298                                               NULL);
 299                        per_cpu(xen_resched_irq, cpu) = -1;
 300                }
 301                if (per_cpu(xen_timer_irq, cpu) >= 0) {
 302                        unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu),
 303                                               NULL);
 304                        per_cpu(xen_timer_irq, cpu) = -1;
 305                }
 306        }
 307        return NOTIFY_OK;
 308}
 309
 310static struct notifier_block unbind_evtchn_notifier = {
 311        .notifier_call = unbind_evtchn_callback,
 312        .priority = 0
 313};
 314#endif
 315
 316void xen_smp_intr_init_early(unsigned int cpu)
 317{
 318#ifdef CONFIG_SMP
 319        unsigned int i;
 320
 321        for (i = 0; i < saved_irq_cnt; i++)
 322                __xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq,
 323                                          saved_percpu_irqs[i].action, 0);
 324#endif
 325}
 326
 327void xen_smp_intr_init(void)
 328{
 329#ifdef CONFIG_SMP
 330        unsigned int cpu = smp_processor_id();
 331        struct callback_register event = {
 332                .type = CALLBACKTYPE_event,
 333                .address = { .ip = (unsigned long)&xen_event_callback },
 334        };
 335
 336        if (cpu == 0) {
 337                /* Initialization was already done for boot cpu.  */
 338#ifdef CONFIG_HOTPLUG_CPU
 339                /* Register the notifier only once.  */
 340                register_cpu_notifier(&unbind_evtchn_notifier);
 341#endif
 342                return;
 343        }
 344
 345        /* This should be piggyback when setup vcpu guest context */
 346        BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
 347#endif /* CONFIG_SMP */
 348}
 349
 350void __init
 351xen_irq_init(void)
 352{
 353        struct callback_register event = {
 354                .type = CALLBACKTYPE_event,
 355                .address = { .ip = (unsigned long)&xen_event_callback },
 356        };
 357
 358        xen_init_IRQ();
 359        BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
 360        late_time_init = xen_bind_early_percpu_irq;
 361}
 362
 363void
 364xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect)
 365{
 366#ifdef CONFIG_SMP
 367        /* TODO: we need to call vcpu_up here */
 368        if (unlikely(vector == ap_wakeup_vector)) {
 369                /* XXX
 370                 * This should be in __cpu_up(cpu) in ia64 smpboot.c
 371                 * like x86. But don't want to modify it,
 372                 * keep it untouched.
 373                 */
 374                xen_smp_intr_init_early(cpu);
 375
 376                xen_send_ipi(cpu, vector);
 377                /* vcpu_prepare_and_up(cpu); */
 378                return;
 379        }
 380#endif
 381
 382        switch (vector) {
 383        case IA64_IPI_VECTOR:
 384                xen_send_IPI_one(cpu, XEN_IPI_VECTOR);
 385                break;
 386        case IA64_IPI_RESCHEDULE:
 387                xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
 388                break;
 389        case IA64_CMCP_VECTOR:
 390                xen_send_IPI_one(cpu, XEN_CMCP_VECTOR);
 391                break;
 392        case IA64_CPEP_VECTOR:
 393                xen_send_IPI_one(cpu, XEN_CPEP_VECTOR);
 394                break;
 395        case IA64_TIMER_VECTOR: {
 396                /* this is used only once by check_sal_cache_flush()
 397                   at boot time */
 398                static int used = 0;
 399                if (!used) {
 400                        xen_send_ipi(cpu, IA64_TIMER_VECTOR);
 401                        used = 1;
 402                        break;
 403                }
 404                /* fallthrough */
 405        }
 406        default:
 407                printk(KERN_WARNING "Unsupported IPI type 0x%x\n",
 408                       vector);
 409                notify_remote_via_irq(0); /* defaults to 0 irq */
 410                break;
 411        }
 412}
 413
 414static void __init
 415xen_register_ipi(void)
 416{
 417#ifdef CONFIG_SMP
 418        register_percpu_irq(IA64_IPI_VECTOR, &xen_ipi_irqaction);
 419        register_percpu_irq(IA64_IPI_RESCHEDULE, &xen_resched_irqaction);
 420        register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &xen_tlb_irqaction);
 421#endif
 422}
 423
 424static void
 425xen_resend_irq(unsigned int vector)
 426{
 427        (void)resend_irq_on_evtchn(vector);
 428}
 429
 430const struct pv_irq_ops xen_irq_ops __initdata = {
 431        .register_ipi = xen_register_ipi,
 432
 433        .assign_irq_vector = xen_assign_irq_vector,
 434        .free_irq_vector = xen_free_irq_vector,
 435        .register_percpu_irq = xen_register_percpu_irq,
 436
 437        .resend_irq = xen_resend_irq,
 438};
 439