linux/arch/ia64/sn/kernel/irq.c
<<
>>
Prefs
   1/*
   2 * Platform dependent support for SGI SN
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License.  See the file "COPYING" in the main directory of this archive
   6 * for more details.
   7 *
   8 * Copyright (c) 2000-2008 Silicon Graphics, Inc.  All Rights Reserved.
   9 */
  10
  11#include <linux/irq.h>
  12#include <linux/spinlock.h>
  13#include <linux/init.h>
  14#include <linux/rculist.h>
  15#include <asm/sn/addrs.h>
  16#include <asm/sn/arch.h>
  17#include <asm/sn/intr.h>
  18#include <asm/sn/pcibr_provider.h>
  19#include <asm/sn/pcibus_provider_defs.h>
  20#include <asm/sn/pcidev.h>
  21#include <asm/sn/shub_mmr.h>
  22#include <asm/sn/sn_sal.h>
  23#include <asm/sn/sn_feature_sets.h>
  24
  25static void force_interrupt(int irq);
  26static void register_intr_pda(struct sn_irq_info *sn_irq_info);
  27static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
  28
  29int sn_force_interrupt_flag = 1;
  30extern int sn_ioif_inited;
  31struct list_head **sn_irq_lh;
  32static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */
  33
  34u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
  35                                     struct sn_irq_info *sn_irq_info,
  36                                     int req_irq, nasid_t req_nasid,
  37                                     int req_slice)
  38{
  39        struct ia64_sal_retval ret_stuff;
  40        ret_stuff.status = 0;
  41        ret_stuff.v0 = 0;
  42
  43        SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
  44                        (u64) SAL_INTR_ALLOC, (u64) local_nasid,
  45                        (u64) local_widget, __pa(sn_irq_info), (u64) req_irq,
  46                        (u64) req_nasid, (u64) req_slice);
  47
  48        return ret_stuff.status;
  49}
  50
  51void sn_intr_free(nasid_t local_nasid, int local_widget,
  52                                struct sn_irq_info *sn_irq_info)
  53{
  54        struct ia64_sal_retval ret_stuff;
  55        ret_stuff.status = 0;
  56        ret_stuff.v0 = 0;
  57
  58        SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
  59                        (u64) SAL_INTR_FREE, (u64) local_nasid,
  60                        (u64) local_widget, (u64) sn_irq_info->irq_irq,
  61                        (u64) sn_irq_info->irq_cookie, 0, 0);
  62}
  63
  64u64 sn_intr_redirect(nasid_t local_nasid, int local_widget,
  65                      struct sn_irq_info *sn_irq_info,
  66                      nasid_t req_nasid, int req_slice)
  67{
  68        struct ia64_sal_retval ret_stuff;
  69        ret_stuff.status = 0;
  70        ret_stuff.v0 = 0;
  71
  72        SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
  73                        (u64) SAL_INTR_REDIRECT, (u64) local_nasid,
  74                        (u64) local_widget, __pa(sn_irq_info),
  75                        (u64) req_nasid, (u64) req_slice, 0);
  76
  77        return ret_stuff.status;
  78}
  79
  80static unsigned int sn_startup_irq(unsigned int irq)
  81{
  82        return 0;
  83}
  84
  85static void sn_shutdown_irq(unsigned int irq)
  86{
  87}
  88
  89extern void ia64_mca_register_cpev(int);
  90
  91static void sn_disable_irq(unsigned int irq)
  92{
  93        if (irq == local_vector_to_irq(IA64_CPE_VECTOR))
  94                ia64_mca_register_cpev(0);
  95}
  96
  97static void sn_enable_irq(unsigned int irq)
  98{
  99        if (irq == local_vector_to_irq(IA64_CPE_VECTOR))
 100                ia64_mca_register_cpev(irq);
 101}
 102
 103static void sn_ack_irq(unsigned int irq)
 104{
 105        u64 event_occurred, mask;
 106
 107        irq = irq & 0xff;
 108        event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED));
 109        mask = event_occurred & SH_ALL_INT_MASK;
 110        HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask);
 111        __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
 112
 113        move_native_irq(irq);
 114}
 115
 116static void sn_end_irq(unsigned int irq)
 117{
 118        int ivec;
 119        u64 event_occurred;
 120
 121        ivec = irq & 0xff;
 122        if (ivec == SGI_UART_VECTOR) {
 123                event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED));
 124                /* If the UART bit is set here, we may have received an
 125                 * interrupt from the UART that the driver missed.  To
 126                 * make sure, we IPI ourselves to force us to look again.
 127                 */
 128                if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
 129                        platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR,
 130                                          IA64_IPI_DM_INT, 0);
 131                }
 132        }
 133        __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
 134        if (sn_force_interrupt_flag)
 135                force_interrupt(irq);
 136}
 137
 138static void sn_irq_info_free(struct rcu_head *head);
 139
 140struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
 141                                       nasid_t nasid, int slice)
 142{
 143        int vector;
 144        int cpuid;
 145#ifdef CONFIG_SMP
 146        int cpuphys;
 147#endif
 148        int64_t bridge;
 149        int local_widget, status;
 150        nasid_t local_nasid;
 151        struct sn_irq_info *new_irq_info;
 152        struct sn_pcibus_provider *pci_provider;
 153
 154        bridge = (u64) sn_irq_info->irq_bridge;
 155        if (!bridge) {
 156                return NULL; /* irq is not a device interrupt */
 157        }
 158
 159        local_nasid = NASID_GET(bridge);
 160
 161        if (local_nasid & 1)
 162                local_widget = TIO_SWIN_WIDGETNUM(bridge);
 163        else
 164                local_widget = SWIN_WIDGETNUM(bridge);
 165        vector = sn_irq_info->irq_irq;
 166
 167        /* Make use of SAL_INTR_REDIRECT if PROM supports it */
 168        status = sn_intr_redirect(local_nasid, local_widget, sn_irq_info, nasid, slice);
 169        if (!status) {
 170                new_irq_info = sn_irq_info;
 171                goto finish_up;
 172        }
 173
 174        /*
 175         * PROM does not support SAL_INTR_REDIRECT, or it failed.
 176         * Revert to old method.
 177         */
 178        new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
 179        if (new_irq_info == NULL)
 180                return NULL;
 181
 182        memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
 183
 184        /* Free the old PROM new_irq_info structure */
 185        sn_intr_free(local_nasid, local_widget, new_irq_info);
 186        unregister_intr_pda(new_irq_info);
 187
 188        /* allocate a new PROM new_irq_info struct */
 189        status = sn_intr_alloc(local_nasid, local_widget,
 190                               new_irq_info, vector,
 191                               nasid, slice);
 192
 193        /* SAL call failed */
 194        if (status) {
 195                kfree(new_irq_info);
 196                return NULL;
 197        }
 198
 199        register_intr_pda(new_irq_info);
 200        spin_lock(&sn_irq_info_lock);
 201        list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
 202        spin_unlock(&sn_irq_info_lock);
 203        call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
 204
 205
 206finish_up:
 207        /* Update kernels new_irq_info with new target info */
 208        cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid,
 209                                     new_irq_info->irq_slice);
 210        new_irq_info->irq_cpuid = cpuid;
 211
 212        pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
 213
 214        /*
 215         * If this represents a line interrupt, target it.  If it's
 216         * an msi (irq_int_bit < 0), it's already targeted.
 217         */
 218        if (new_irq_info->irq_int_bit >= 0 &&
 219            pci_provider && pci_provider->target_interrupt)
 220                (pci_provider->target_interrupt)(new_irq_info);
 221
 222#ifdef CONFIG_SMP
 223        cpuphys = cpu_physical_id(cpuid);
 224        set_irq_affinity_info((vector & 0xff), cpuphys, 0);
 225#endif
 226
 227        return new_irq_info;
 228}
 229
 230static int sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
 231{
 232        struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
 233        nasid_t nasid;
 234        int slice;
 235
 236        nasid = cpuid_to_nasid(cpumask_first(mask));
 237        slice = cpuid_to_slice(cpumask_first(mask));
 238
 239        list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
 240                                 sn_irq_lh[irq], list)
 241                (void)sn_retarget_vector(sn_irq_info, nasid, slice);
 242
 243        return 0;
 244}
 245
 246#ifdef CONFIG_SMP
 247void sn_set_err_irq_affinity(unsigned int irq)
 248{
 249        /*
 250         * On systems which support CPU disabling (SHub2), all error interrupts
 251         * are targetted at the boot CPU.
 252         */
 253        if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT))
 254                set_irq_affinity_info(irq, cpu_physical_id(0), 0);
 255}
 256#else
 257void sn_set_err_irq_affinity(unsigned int irq) { }
 258#endif
 259
 260static void
 261sn_mask_irq(unsigned int irq)
 262{
 263}
 264
 265static void
 266sn_unmask_irq(unsigned int irq)
 267{
 268}
 269
 270struct irq_chip irq_type_sn = {
 271        .name           = "SN hub",
 272        .startup        = sn_startup_irq,
 273        .shutdown       = sn_shutdown_irq,
 274        .enable         = sn_enable_irq,
 275        .disable        = sn_disable_irq,
 276        .ack            = sn_ack_irq,
 277        .end            = sn_end_irq,
 278        .mask           = sn_mask_irq,
 279        .unmask         = sn_unmask_irq,
 280        .set_affinity   = sn_set_affinity_irq
 281};
 282
 283ia64_vector sn_irq_to_vector(int irq)
 284{
 285        if (irq >= IA64_NUM_VECTORS)
 286                return 0;
 287        return (ia64_vector)irq;
 288}
 289
 290unsigned int sn_local_vector_to_irq(u8 vector)
 291{
 292        return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
 293}
 294
 295void sn_irq_init(void)
 296{
 297        int i;
 298        struct irq_desc *base_desc = irq_desc;
 299
 300        ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
 301        ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
 302
 303        for (i = 0; i < NR_IRQS; i++) {
 304                if (base_desc[i].chip == &no_irq_chip) {
 305                        base_desc[i].chip = &irq_type_sn;
 306                }
 307        }
 308}
 309
 310static void register_intr_pda(struct sn_irq_info *sn_irq_info)
 311{
 312        int irq = sn_irq_info->irq_irq;
 313        int cpu = sn_irq_info->irq_cpuid;
 314
 315        if (pdacpu(cpu)->sn_last_irq < irq) {
 316                pdacpu(cpu)->sn_last_irq = irq;
 317        }
 318
 319        if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq)
 320                pdacpu(cpu)->sn_first_irq = irq;
 321}
 322
 323static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
 324{
 325        int irq = sn_irq_info->irq_irq;
 326        int cpu = sn_irq_info->irq_cpuid;
 327        struct sn_irq_info *tmp_irq_info;
 328        int i, foundmatch;
 329
 330        rcu_read_lock();
 331        if (pdacpu(cpu)->sn_last_irq == irq) {
 332                foundmatch = 0;
 333                for (i = pdacpu(cpu)->sn_last_irq - 1;
 334                     i && !foundmatch; i--) {
 335                        list_for_each_entry_rcu(tmp_irq_info,
 336                                                sn_irq_lh[i],
 337                                                list) {
 338                                if (tmp_irq_info->irq_cpuid == cpu) {
 339                                        foundmatch = 1;
 340                                        break;
 341                                }
 342                        }
 343                }
 344                pdacpu(cpu)->sn_last_irq = i;
 345        }
 346
 347        if (pdacpu(cpu)->sn_first_irq == irq) {
 348                foundmatch = 0;
 349                for (i = pdacpu(cpu)->sn_first_irq + 1;
 350                     i < NR_IRQS && !foundmatch; i++) {
 351                        list_for_each_entry_rcu(tmp_irq_info,
 352                                                sn_irq_lh[i],
 353                                                list) {
 354                                if (tmp_irq_info->irq_cpuid == cpu) {
 355                                        foundmatch = 1;
 356                                        break;
 357                                }
 358                        }
 359                }
 360                pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i);
 361        }
 362        rcu_read_unlock();
 363}
 364
 365static void sn_irq_info_free(struct rcu_head *head)
 366{
 367        struct sn_irq_info *sn_irq_info;
 368
 369        sn_irq_info = container_of(head, struct sn_irq_info, rcu);
 370        kfree(sn_irq_info);
 371}
 372
 373void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
 374{
 375        nasid_t nasid = sn_irq_info->irq_nasid;
 376        int slice = sn_irq_info->irq_slice;
 377        int cpu = nasid_slice_to_cpuid(nasid, slice);
 378#ifdef CONFIG_SMP
 379        int cpuphys;
 380        struct irq_desc *desc;
 381#endif
 382
 383        pci_dev_get(pci_dev);
 384        sn_irq_info->irq_cpuid = cpu;
 385        sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev);
 386
 387        /* link it into the sn_irq[irq] list */
 388        spin_lock(&sn_irq_info_lock);
 389        list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
 390        reserve_irq_vector(sn_irq_info->irq_irq);
 391        spin_unlock(&sn_irq_info_lock);
 392
 393        register_intr_pda(sn_irq_info);
 394#ifdef CONFIG_SMP
 395        cpuphys = cpu_physical_id(cpu);
 396        set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0);
 397        desc = irq_to_desc(sn_irq_info->irq_irq);
 398        /*
 399         * Affinity was set by the PROM, prevent it from
 400         * being reset by the request_irq() path.
 401         */
 402        desc->status |= IRQ_AFFINITY_SET;
 403#endif
 404}
 405
 406void sn_irq_unfixup(struct pci_dev *pci_dev)
 407{
 408        struct sn_irq_info *sn_irq_info;
 409
 410        /* Only cleanup IRQ stuff if this device has a host bus context */
 411        if (!SN_PCIDEV_BUSSOFT(pci_dev))
 412                return;
 413
 414        sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info;
 415        if (!sn_irq_info)
 416                return;
 417        if (!sn_irq_info->irq_irq) {
 418                kfree(sn_irq_info);
 419                return;
 420        }
 421
 422        unregister_intr_pda(sn_irq_info);
 423        spin_lock(&sn_irq_info_lock);
 424        list_del_rcu(&sn_irq_info->list);
 425        spin_unlock(&sn_irq_info_lock);
 426        if (list_empty(sn_irq_lh[sn_irq_info->irq_irq]))
 427                free_irq_vector(sn_irq_info->irq_irq);
 428        call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
 429        pci_dev_put(pci_dev);
 430
 431}
 432
 433static inline void
 434sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info)
 435{
 436        struct sn_pcibus_provider *pci_provider;
 437
 438        pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type];
 439
 440        /* Don't force an interrupt if the irq has been disabled */
 441        if (!(irq_desc[sn_irq_info->irq_irq].status & IRQ_DISABLED) &&
 442            pci_provider && pci_provider->force_interrupt)
 443                (*pci_provider->force_interrupt)(sn_irq_info);
 444}
 445
 446static void force_interrupt(int irq)
 447{
 448        struct sn_irq_info *sn_irq_info;
 449
 450        if (!sn_ioif_inited)
 451                return;
 452
 453        rcu_read_lock();
 454        list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list)
 455                sn_call_force_intr_provider(sn_irq_info);
 456
 457        rcu_read_unlock();
 458}
 459
 460/*
 461 * Check for lost interrupts.  If the PIC int_status reg. says that
 462 * an interrupt has been sent, but not handled, and the interrupt
 463 * is not pending in either the cpu irr regs or in the soft irr regs,
 464 * and the interrupt is not in service, then the interrupt may have
 465 * been lost.  Force an interrupt on that pin.  It is possible that
 466 * the interrupt is in flight, so we may generate a spurious interrupt,
 467 * but we should never miss a real lost interrupt.
 468 */
 469static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
 470{
 471        u64 regval;
 472        struct pcidev_info *pcidev_info;
 473        struct pcibus_info *pcibus_info;
 474
 475        /*
 476         * Bridge types attached to TIO (anything but PIC) do not need this WAR
 477         * since they do not target Shub II interrupt registers.  If that
 478         * ever changes, this check needs to accomodate.
 479         */
 480        if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC)
 481                return;
 482
 483        pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
 484        if (!pcidev_info)
 485                return;
 486
 487        pcibus_info =
 488            (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
 489            pdi_pcibus_info;
 490        regval = pcireg_intr_status_get(pcibus_info);
 491
 492        if (!ia64_get_irr(irq_to_vector(irq))) {
 493                if (!test_bit(irq, pda->sn_in_service_ivecs)) {
 494                        regval &= 0xff;
 495                        if (sn_irq_info->irq_int_bit & regval &
 496                            sn_irq_info->irq_last_intr) {
 497                                regval &= ~(sn_irq_info->irq_int_bit & regval);
 498                                sn_call_force_intr_provider(sn_irq_info);
 499                        }
 500                }
 501        }
 502        sn_irq_info->irq_last_intr = regval;
 503}
 504
 505void sn_lb_int_war_check(void)
 506{
 507        struct sn_irq_info *sn_irq_info;
 508        int i;
 509
 510        if (!sn_ioif_inited || pda->sn_first_irq == 0)
 511                return;
 512
 513        rcu_read_lock();
 514        for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
 515                list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) {
 516                        sn_check_intr(i, sn_irq_info);
 517                }
 518        }
 519        rcu_read_unlock();
 520}
 521
 522void __init sn_irq_lh_init(void)
 523{
 524        int i;
 525
 526        sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL);
 527        if (!sn_irq_lh)
 528                panic("SN PCI INIT: Failed to allocate memory for PCI init\n");
 529
 530        for (i = 0; i < NR_IRQS; i++) {
 531                sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL);
 532                if (!sn_irq_lh[i])
 533                        panic("SN PCI INIT: Failed IRQ memory allocation\n");
 534
 535                INIT_LIST_HEAD(sn_irq_lh[i]);
 536        }
 537}
 538