linux/arch/ia64/kernel/irq_ia64.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/arch/ia64/kernel/irq_ia64.c
   4 *
   5 * Copyright (C) 1998-2001 Hewlett-Packard Co
   6 *      Stephane Eranian <eranian@hpl.hp.com>
   7 *      David Mosberger-Tang <davidm@hpl.hp.com>
   8 *
   9 *  6/10/99: Updated to bring in sync with x86 version to facilitate
  10 *           support for SMP and different interrupt controllers.
  11 *
  12 * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
  13 *                      PCI to vector allocation routine.
  14 * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
  15 *                                              Added CPU Hotplug handling for IPF.
  16 */
  17
  18#include <linux/module.h>
  19#include <linux/pgtable.h>
  20
  21#include <linux/jiffies.h>
  22#include <linux/errno.h>
  23#include <linux/init.h>
  24#include <linux/interrupt.h>
  25#include <linux/ioport.h>
  26#include <linux/kernel_stat.h>
  27#include <linux/ptrace.h>
  28#include <linux/signal.h>
  29#include <linux/smp.h>
  30#include <linux/threads.h>
  31#include <linux/bitops.h>
  32#include <linux/irq.h>
  33#include <linux/ratelimit.h>
  34#include <linux/acpi.h>
  35#include <linux/sched.h>
  36
  37#include <asm/delay.h>
  38#include <asm/intrinsics.h>
  39#include <asm/io.h>
  40#include <asm/hw_irq.h>
  41#include <asm/tlbflush.h>
  42
  43#define IRQ_DEBUG       0
  44
  45#define IRQ_VECTOR_UNASSIGNED   (0)
  46
  47#define IRQ_UNUSED              (0)
  48#define IRQ_USED                (1)
  49#define IRQ_RSVD                (2)
  50
  51int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
  52int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
  53
  54/* default base addr of IPI table */
  55void __iomem *ipi_base_addr = ((void __iomem *)
  56                               (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
  57
  58static cpumask_t vector_allocation_domain(int cpu);
  59
  60/*
  61 * Legacy IRQ to IA-64 vector translation table.
  62 */
  63__u8 isa_irq_to_vector_map[16] = {
  64        /* 8259 IRQ translation, first 16 entries */
  65        0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
  66        0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
  67};
  68EXPORT_SYMBOL(isa_irq_to_vector_map);
  69
  70DEFINE_SPINLOCK(vector_lock);
  71
  72struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
  73        [0 ... NR_IRQS - 1] = {
  74                .vector = IRQ_VECTOR_UNASSIGNED,
  75                .domain = CPU_MASK_NONE
  76        }
  77};
  78
  79DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
  80        [0 ... IA64_NUM_VECTORS - 1] = -1
  81};
  82
  83static cpumask_t vector_table[IA64_NUM_VECTORS] = {
  84        [0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
  85};
  86
  87static int irq_status[NR_IRQS] = {
  88        [0 ... NR_IRQS -1] = IRQ_UNUSED
  89};
  90
  91static inline int find_unassigned_irq(void)
  92{
  93        int irq;
  94
  95        for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
  96                if (irq_status[irq] == IRQ_UNUSED)
  97                        return irq;
  98        return -ENOSPC;
  99}
 100
 101static inline int find_unassigned_vector(cpumask_t domain)
 102{
 103        cpumask_t mask;
 104        int pos, vector;
 105
 106        cpumask_and(&mask, &domain, cpu_online_mask);
 107        if (cpumask_empty(&mask))
 108                return -EINVAL;
 109
 110        for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
 111                vector = IA64_FIRST_DEVICE_VECTOR + pos;
 112                cpumask_and(&mask, &domain, &vector_table[vector]);
 113                if (!cpumask_empty(&mask))
 114                        continue;
 115                return vector;
 116        }
 117        return -ENOSPC;
 118}
 119
 120static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
 121{
 122        cpumask_t mask;
 123        int cpu;
 124        struct irq_cfg *cfg = &irq_cfg[irq];
 125
 126        BUG_ON((unsigned)irq >= NR_IRQS);
 127        BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
 128
 129        cpumask_and(&mask, &domain, cpu_online_mask);
 130        if (cpumask_empty(&mask))
 131                return -EINVAL;
 132        if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain))
 133                return 0;
 134        if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
 135                return -EBUSY;
 136        for_each_cpu(cpu, &mask)
 137                per_cpu(vector_irq, cpu)[vector] = irq;
 138        cfg->vector = vector;
 139        cfg->domain = domain;
 140        irq_status[irq] = IRQ_USED;
 141        cpumask_or(&vector_table[vector], &vector_table[vector], &domain);
 142        return 0;
 143}
 144
 145int bind_irq_vector(int irq, int vector, cpumask_t domain)
 146{
 147        unsigned long flags;
 148        int ret;
 149
 150        spin_lock_irqsave(&vector_lock, flags);
 151        ret = __bind_irq_vector(irq, vector, domain);
 152        spin_unlock_irqrestore(&vector_lock, flags);
 153        return ret;
 154}
 155
 156static void __clear_irq_vector(int irq)
 157{
 158        int vector, cpu;
 159        cpumask_t domain;
 160        struct irq_cfg *cfg = &irq_cfg[irq];
 161
 162        BUG_ON((unsigned)irq >= NR_IRQS);
 163        BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
 164        vector = cfg->vector;
 165        domain = cfg->domain;
 166        for_each_cpu_and(cpu, &cfg->domain, cpu_online_mask)
 167                per_cpu(vector_irq, cpu)[vector] = -1;
 168        cfg->vector = IRQ_VECTOR_UNASSIGNED;
 169        cfg->domain = CPU_MASK_NONE;
 170        irq_status[irq] = IRQ_UNUSED;
 171        cpumask_andnot(&vector_table[vector], &vector_table[vector], &domain);
 172}
 173
 174static void clear_irq_vector(int irq)
 175{
 176        unsigned long flags;
 177
 178        spin_lock_irqsave(&vector_lock, flags);
 179        __clear_irq_vector(irq);
 180        spin_unlock_irqrestore(&vector_lock, flags);
 181}
 182
 183int
 184ia64_native_assign_irq_vector (int irq)
 185{
 186        unsigned long flags;
 187        int vector, cpu;
 188        cpumask_t domain = CPU_MASK_NONE;
 189
 190        vector = -ENOSPC;
 191
 192        spin_lock_irqsave(&vector_lock, flags);
 193        for_each_online_cpu(cpu) {
 194                domain = vector_allocation_domain(cpu);
 195                vector = find_unassigned_vector(domain);
 196                if (vector >= 0)
 197                        break;
 198        }
 199        if (vector < 0)
 200                goto out;
 201        if (irq == AUTO_ASSIGN)
 202                irq = vector;
 203        BUG_ON(__bind_irq_vector(irq, vector, domain));
 204 out:
 205        spin_unlock_irqrestore(&vector_lock, flags);
 206        return vector;
 207}
 208
 209void
 210ia64_native_free_irq_vector (int vector)
 211{
 212        if (vector < IA64_FIRST_DEVICE_VECTOR ||
 213            vector > IA64_LAST_DEVICE_VECTOR)
 214                return;
 215        clear_irq_vector(vector);
 216}
 217
 218int
 219reserve_irq_vector (int vector)
 220{
 221        if (vector < IA64_FIRST_DEVICE_VECTOR ||
 222            vector > IA64_LAST_DEVICE_VECTOR)
 223                return -EINVAL;
 224        return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
 225}
 226
 227/*
 228 * Initialize vector_irq on a new cpu. This function must be called
 229 * with vector_lock held.
 230 */
 231void __setup_vector_irq(int cpu)
 232{
 233        int irq, vector;
 234
 235        /* Clear vector_irq */
 236        for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
 237                per_cpu(vector_irq, cpu)[vector] = -1;
 238        /* Mark the inuse vectors */
 239        for (irq = 0; irq < NR_IRQS; ++irq) {
 240                if (!cpumask_test_cpu(cpu, &irq_cfg[irq].domain))
 241                        continue;
 242                vector = irq_to_vector(irq);
 243                per_cpu(vector_irq, cpu)[vector] = irq;
 244        }
 245}
 246
 247#ifdef CONFIG_SMP
 248
 249static enum vector_domain_type {
 250        VECTOR_DOMAIN_NONE,
 251        VECTOR_DOMAIN_PERCPU
 252} vector_domain_type = VECTOR_DOMAIN_NONE;
 253
 254static cpumask_t vector_allocation_domain(int cpu)
 255{
 256        if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
 257                return *cpumask_of(cpu);
 258        return CPU_MASK_ALL;
 259}
 260
 261static int __irq_prepare_move(int irq, int cpu)
 262{
 263        struct irq_cfg *cfg = &irq_cfg[irq];
 264        int vector;
 265        cpumask_t domain;
 266
 267        if (cfg->move_in_progress || cfg->move_cleanup_count)
 268                return -EBUSY;
 269        if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
 270                return -EINVAL;
 271        if (cpumask_test_cpu(cpu, &cfg->domain))
 272                return 0;
 273        domain = vector_allocation_domain(cpu);
 274        vector = find_unassigned_vector(domain);
 275        if (vector < 0)
 276                return -ENOSPC;
 277        cfg->move_in_progress = 1;
 278        cfg->old_domain = cfg->domain;
 279        cfg->vector = IRQ_VECTOR_UNASSIGNED;
 280        cfg->domain = CPU_MASK_NONE;
 281        BUG_ON(__bind_irq_vector(irq, vector, domain));
 282        return 0;
 283}
 284
 285int irq_prepare_move(int irq, int cpu)
 286{
 287        unsigned long flags;
 288        int ret;
 289
 290        spin_lock_irqsave(&vector_lock, flags);
 291        ret = __irq_prepare_move(irq, cpu);
 292        spin_unlock_irqrestore(&vector_lock, flags);
 293        return ret;
 294}
 295
 296void irq_complete_move(unsigned irq)
 297{
 298        struct irq_cfg *cfg = &irq_cfg[irq];
 299        cpumask_t cleanup_mask;
 300        int i;
 301
 302        if (likely(!cfg->move_in_progress))
 303                return;
 304
 305        if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg->old_domain)))
 306                return;
 307
 308        cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
 309        cfg->move_cleanup_count = cpumask_weight(&cleanup_mask);
 310        for_each_cpu(i, &cleanup_mask)
 311                ia64_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
 312        cfg->move_in_progress = 0;
 313}
 314
 315static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
 316{
 317        int me = smp_processor_id();
 318        ia64_vector vector;
 319        unsigned long flags;
 320
 321        for (vector = IA64_FIRST_DEVICE_VECTOR;
 322             vector < IA64_LAST_DEVICE_VECTOR; vector++) {
 323                int irq;
 324                struct irq_desc *desc;
 325                struct irq_cfg *cfg;
 326                irq = __this_cpu_read(vector_irq[vector]);
 327                if (irq < 0)
 328                        continue;
 329
 330                desc = irq_to_desc(irq);
 331                cfg = irq_cfg + irq;
 332                raw_spin_lock(&desc->lock);
 333                if (!cfg->move_cleanup_count)
 334                        goto unlock;
 335
 336                if (!cpumask_test_cpu(me, &cfg->old_domain))
 337                        goto unlock;
 338
 339                spin_lock_irqsave(&vector_lock, flags);
 340                __this_cpu_write(vector_irq[vector], -1);
 341                cpumask_clear_cpu(me, &vector_table[vector]);
 342                spin_unlock_irqrestore(&vector_lock, flags);
 343                cfg->move_cleanup_count--;
 344        unlock:
 345                raw_spin_unlock(&desc->lock);
 346        }
 347        return IRQ_HANDLED;
 348}
 349
 350static int __init parse_vector_domain(char *arg)
 351{
 352        if (!arg)
 353                return -EINVAL;
 354        if (!strcmp(arg, "percpu")) {
 355                vector_domain_type = VECTOR_DOMAIN_PERCPU;
 356                no_int_routing = 1;
 357        }
 358        return 0;
 359}
 360early_param("vector", parse_vector_domain);
 361#else
 362static cpumask_t vector_allocation_domain(int cpu)
 363{
 364        return CPU_MASK_ALL;
 365}
 366#endif
 367
 368
 369void destroy_and_reserve_irq(unsigned int irq)
 370{
 371        unsigned long flags;
 372
 373        irq_init_desc(irq);
 374        spin_lock_irqsave(&vector_lock, flags);
 375        __clear_irq_vector(irq);
 376        irq_status[irq] = IRQ_RSVD;
 377        spin_unlock_irqrestore(&vector_lock, flags);
 378}
 379
 380/*
 381 * Dynamic irq allocate and deallocation for MSI
 382 */
 383int create_irq(void)
 384{
 385        unsigned long flags;
 386        int irq, vector, cpu;
 387        cpumask_t domain = CPU_MASK_NONE;
 388
 389        irq = vector = -ENOSPC;
 390        spin_lock_irqsave(&vector_lock, flags);
 391        for_each_online_cpu(cpu) {
 392                domain = vector_allocation_domain(cpu);
 393                vector = find_unassigned_vector(domain);
 394                if (vector >= 0)
 395                        break;
 396        }
 397        if (vector < 0)
 398                goto out;
 399        irq = find_unassigned_irq();
 400        if (irq < 0)
 401                goto out;
 402        BUG_ON(__bind_irq_vector(irq, vector, domain));
 403 out:
 404        spin_unlock_irqrestore(&vector_lock, flags);
 405        if (irq >= 0)
 406                irq_init_desc(irq);
 407        return irq;
 408}
 409
 410void destroy_irq(unsigned int irq)
 411{
 412        irq_init_desc(irq);
 413        clear_irq_vector(irq);
 414}
 415
 416#ifdef CONFIG_SMP
 417#       define IS_RESCHEDULE(vec)       (vec == IA64_IPI_RESCHEDULE)
 418#       define IS_LOCAL_TLB_FLUSH(vec)  (vec == IA64_IPI_LOCAL_TLB_FLUSH)
 419#else
 420#       define IS_RESCHEDULE(vec)       (0)
 421#       define IS_LOCAL_TLB_FLUSH(vec)  (0)
 422#endif
 423/*
 424 * That's where the IVT branches when we get an external
 425 * interrupt. This branches to the correct hardware IRQ handler via
 426 * function ptr.
 427 */
 428void
 429ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
 430{
 431        struct pt_regs *old_regs = set_irq_regs(regs);
 432        unsigned long saved_tpr;
 433
 434#if IRQ_DEBUG
 435        {
 436                unsigned long bsp, sp;
 437
 438                /*
 439                 * Note: if the interrupt happened while executing in
 440                 * the context switch routine (ia64_switch_to), we may
 441                 * get a spurious stack overflow here.  This is
 442                 * because the register and the memory stack are not
 443                 * switched atomically.
 444                 */
 445                bsp = ia64_getreg(_IA64_REG_AR_BSP);
 446                sp = ia64_getreg(_IA64_REG_SP);
 447
 448                if ((sp - bsp) < 1024) {
 449                        static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
 450
 451                        if (__ratelimit(&ratelimit)) {
 452                                printk("ia64_handle_irq: DANGER: less than "
 453                                       "1KB of free stack space!!\n"
 454                                       "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
 455                        }
 456                }
 457        }
 458#endif /* IRQ_DEBUG */
 459
 460        /*
 461         * Always set TPR to limit maximum interrupt nesting depth to
 462         * 16 (without this, it would be ~240, which could easily lead
 463         * to kernel stack overflows).
 464         */
 465        irq_enter();
 466        saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
 467        ia64_srlz_d();
 468        while (vector != IA64_SPURIOUS_INT_VECTOR) {
 469                int irq = local_vector_to_irq(vector);
 470
 471                if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
 472                        smp_local_flush_tlb();
 473                        kstat_incr_irq_this_cpu(irq);
 474                } else if (unlikely(IS_RESCHEDULE(vector))) {
 475                        scheduler_ipi();
 476                        kstat_incr_irq_this_cpu(irq);
 477                } else {
 478                        ia64_setreg(_IA64_REG_CR_TPR, vector);
 479                        ia64_srlz_d();
 480
 481                        if (unlikely(irq < 0)) {
 482                                printk(KERN_ERR "%s: Unexpected interrupt "
 483                                       "vector %d on CPU %d is not mapped "
 484                                       "to any IRQ!\n", __func__, vector,
 485                                       smp_processor_id());
 486                        } else
 487                                generic_handle_irq(irq);
 488
 489                        /*
 490                         * Disable interrupts and send EOI:
 491                         */
 492                        local_irq_disable();
 493                        ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
 494                }
 495                ia64_eoi();
 496                vector = ia64_get_ivr();
 497        }
 498        /*
 499         * This must be done *after* the ia64_eoi().  For example, the keyboard softirq
 500         * handler needs to be able to wait for further keyboard interrupts, which can't
 501         * come through until ia64_eoi() has been done.
 502         */
 503        irq_exit();
 504        set_irq_regs(old_regs);
 505}
 506
 507#ifdef CONFIG_HOTPLUG_CPU
 508/*
 509 * This function emulates a interrupt processing when a cpu is about to be
 510 * brought down.
 511 */
 512void ia64_process_pending_intr(void)
 513{
 514        ia64_vector vector;
 515        unsigned long saved_tpr;
 516        extern unsigned int vectors_in_migration[NR_IRQS];
 517
 518        vector = ia64_get_ivr();
 519
 520        irq_enter();
 521        saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
 522        ia64_srlz_d();
 523
 524         /*
 525          * Perform normal interrupt style processing
 526          */
 527        while (vector != IA64_SPURIOUS_INT_VECTOR) {
 528                int irq = local_vector_to_irq(vector);
 529
 530                if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
 531                        smp_local_flush_tlb();
 532                        kstat_incr_irq_this_cpu(irq);
 533                } else if (unlikely(IS_RESCHEDULE(vector))) {
 534                        kstat_incr_irq_this_cpu(irq);
 535                } else {
 536                        struct pt_regs *old_regs = set_irq_regs(NULL);
 537
 538                        ia64_setreg(_IA64_REG_CR_TPR, vector);
 539                        ia64_srlz_d();
 540
 541                        /*
 542                         * Now try calling normal ia64_handle_irq as it would have got called
 543                         * from a real intr handler. Try passing null for pt_regs, hopefully
 544                         * it will work. I hope it works!.
 545                         * Probably could shared code.
 546                         */
 547                        if (unlikely(irq < 0)) {
 548                                printk(KERN_ERR "%s: Unexpected interrupt "
 549                                       "vector %d on CPU %d not being mapped "
 550                                       "to any IRQ!!\n", __func__, vector,
 551                                       smp_processor_id());
 552                        } else {
 553                                vectors_in_migration[irq]=0;
 554                                generic_handle_irq(irq);
 555                        }
 556                        set_irq_regs(old_regs);
 557
 558                        /*
 559                         * Disable interrupts and send EOI
 560                         */
 561                        local_irq_disable();
 562                        ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
 563                }
 564                ia64_eoi();
 565                vector = ia64_get_ivr();
 566        }
 567        irq_exit();
 568}
 569#endif
 570
 571
 572#ifdef CONFIG_SMP
 573
 574static irqreturn_t dummy_handler (int irq, void *dev_id)
 575{
 576        BUG();
 577        return IRQ_NONE;
 578}
 579
 580/*
 581 * KVM uses this interrupt to force a cpu out of guest mode
 582 */
 583
 584#endif
 585
 586void
 587register_percpu_irq(ia64_vector vec, irq_handler_t handler, unsigned long flags,
 588                    const char *name)
 589{
 590        unsigned int irq;
 591
 592        irq = vec;
 593        BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
 594        irq_set_status_flags(irq, IRQ_PER_CPU);
 595        irq_set_chip(irq, &irq_type_ia64_lsapic);
 596        if (handler)
 597                if (request_irq(irq, handler, flags, name, NULL))
 598                        pr_err("Failed to request irq %u (%s)\n", irq, name);
 599        irq_set_handler(irq, handle_percpu_irq);
 600}
 601
 602void __init
 603ia64_native_register_ipi(void)
 604{
 605#ifdef CONFIG_SMP
 606        register_percpu_irq(IA64_IPI_VECTOR, handle_IPI, 0, "IPI");
 607        register_percpu_irq(IA64_IPI_RESCHEDULE, dummy_handler, 0, "resched");
 608        register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, dummy_handler, 0,
 609                            "tlb_flush");
 610#endif
 611}
 612
 613void __init
 614init_IRQ (void)
 615{
 616        acpi_boot_init();
 617        ia64_register_ipi();
 618        register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL, 0, NULL);
 619#ifdef CONFIG_SMP
 620        if (vector_domain_type != VECTOR_DOMAIN_NONE) {
 621                register_percpu_irq(IA64_IRQ_MOVE_VECTOR,
 622                                    smp_irq_move_cleanup_interrupt, 0,
 623                                    "irq_move");
 624        }
 625#endif
 626}
 627
 628void
 629ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
 630{
 631        void __iomem *ipi_addr;
 632        unsigned long ipi_data;
 633        unsigned long phys_cpu_id;
 634
 635        phys_cpu_id = cpu_physical_id(cpu);
 636
 637        /*
 638         * cpu number is in 8bit ID and 8bit EID
 639         */
 640
 641        ipi_data = (delivery_mode << 8) | (vector & 0xff);
 642        ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
 643
 644        writeq(ipi_data, ipi_addr);
 645}
 646