linux/arch/ia64/sn/kernel/irq.c
<<
>>
Prefs
   1/*
   2 * Platform dependent support for SGI SN
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License.  See the file "COPYING" in the main directory of this archive
   6 * for more details.
   7 *
   8 * Copyright (c) 2000-2008 Silicon Graphics, Inc.  All Rights Reserved.
   9 */
  10
  11#include <linux/irq.h>
  12#include <linux/spinlock.h>
  13#include <linux/init.h>
  14#include <linux/rculist.h>
  15#include <linux/slab.h>
  16#include <asm/sn/addrs.h>
  17#include <asm/sn/arch.h>
  18#include <asm/sn/intr.h>
  19#include <asm/sn/pcibr_provider.h>
  20#include <asm/sn/pcibus_provider_defs.h>
  21#include <asm/sn/pcidev.h>
  22#include <asm/sn/shub_mmr.h>
  23#include <asm/sn/sn_sal.h>
  24#include <asm/sn/sn_feature_sets.h>
  25
  26static void register_intr_pda(struct sn_irq_info *sn_irq_info);
  27static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
  28
  29extern int sn_ioif_inited;
  30struct list_head **sn_irq_lh;
  31static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */
  32
  33u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
  34                                     struct sn_irq_info *sn_irq_info,
  35                                     int req_irq, nasid_t req_nasid,
  36                                     int req_slice)
  37{
  38        struct ia64_sal_retval ret_stuff;
  39        ret_stuff.status = 0;
  40        ret_stuff.v0 = 0;
  41
  42        SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
  43                        (u64) SAL_INTR_ALLOC, (u64) local_nasid,
  44                        (u64) local_widget, __pa(sn_irq_info), (u64) req_irq,
  45                        (u64) req_nasid, (u64) req_slice);
  46
  47        return ret_stuff.status;
  48}
  49
  50void sn_intr_free(nasid_t local_nasid, int local_widget,
  51                                struct sn_irq_info *sn_irq_info)
  52{
  53        struct ia64_sal_retval ret_stuff;
  54        ret_stuff.status = 0;
  55        ret_stuff.v0 = 0;
  56
  57        SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
  58                        (u64) SAL_INTR_FREE, (u64) local_nasid,
  59                        (u64) local_widget, (u64) sn_irq_info->irq_irq,
  60                        (u64) sn_irq_info->irq_cookie, 0, 0);
  61}
  62
  63u64 sn_intr_redirect(nasid_t local_nasid, int local_widget,
  64                      struct sn_irq_info *sn_irq_info,
  65                      nasid_t req_nasid, int req_slice)
  66{
  67        struct ia64_sal_retval ret_stuff;
  68        ret_stuff.status = 0;
  69        ret_stuff.v0 = 0;
  70
  71        SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
  72                        (u64) SAL_INTR_REDIRECT, (u64) local_nasid,
  73                        (u64) local_widget, __pa(sn_irq_info),
  74                        (u64) req_nasid, (u64) req_slice, 0);
  75
  76        return ret_stuff.status;
  77}
  78
  79static unsigned int sn_startup_irq(struct irq_data *data)
  80{
  81        return 0;
  82}
  83
  84static void sn_shutdown_irq(struct irq_data *data)
  85{
  86}
  87
  88extern void ia64_mca_register_cpev(int);
  89
  90static void sn_disable_irq(struct irq_data *data)
  91{
  92        if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR))
  93                ia64_mca_register_cpev(0);
  94}
  95
  96static void sn_enable_irq(struct irq_data *data)
  97{
  98        if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR))
  99                ia64_mca_register_cpev(data->irq);
 100}
 101
 102static void sn_ack_irq(struct irq_data *data)
 103{
 104        u64 event_occurred, mask;
 105        unsigned int irq = data->irq & 0xff;
 106
 107        event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED));
 108        mask = event_occurred & SH_ALL_INT_MASK;
 109        HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask);
 110        __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
 111
 112        irq_move_irq(data);
 113}
 114
 115struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
 116                                       nasid_t nasid, int slice)
 117{
 118        int vector;
 119        int cpuid;
 120#ifdef CONFIG_SMP
 121        int cpuphys;
 122#endif
 123        int64_t bridge;
 124        int local_widget, status;
 125        nasid_t local_nasid;
 126        struct sn_irq_info *new_irq_info;
 127        struct sn_pcibus_provider *pci_provider;
 128
 129        bridge = (u64) sn_irq_info->irq_bridge;
 130        if (!bridge) {
 131                return NULL; /* irq is not a device interrupt */
 132        }
 133
 134        local_nasid = NASID_GET(bridge);
 135
 136        if (local_nasid & 1)
 137                local_widget = TIO_SWIN_WIDGETNUM(bridge);
 138        else
 139                local_widget = SWIN_WIDGETNUM(bridge);
 140        vector = sn_irq_info->irq_irq;
 141
 142        /* Make use of SAL_INTR_REDIRECT if PROM supports it */
 143        status = sn_intr_redirect(local_nasid, local_widget, sn_irq_info, nasid, slice);
 144        if (!status) {
 145                new_irq_info = sn_irq_info;
 146                goto finish_up;
 147        }
 148
 149        /*
 150         * PROM does not support SAL_INTR_REDIRECT, or it failed.
 151         * Revert to old method.
 152         */
 153        new_irq_info = kmemdup(sn_irq_info, sizeof(struct sn_irq_info),
 154                               GFP_ATOMIC);
 155        if (new_irq_info == NULL)
 156                return NULL;
 157
 158        /* Free the old PROM new_irq_info structure */
 159        sn_intr_free(local_nasid, local_widget, new_irq_info);
 160        unregister_intr_pda(new_irq_info);
 161
 162        /* allocate a new PROM new_irq_info struct */
 163        status = sn_intr_alloc(local_nasid, local_widget,
 164                               new_irq_info, vector,
 165                               nasid, slice);
 166
 167        /* SAL call failed */
 168        if (status) {
 169                kfree(new_irq_info);
 170                return NULL;
 171        }
 172
 173        register_intr_pda(new_irq_info);
 174        spin_lock(&sn_irq_info_lock);
 175        list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
 176        spin_unlock(&sn_irq_info_lock);
 177        kfree_rcu(sn_irq_info, rcu);
 178
 179
 180finish_up:
 181        /* Update kernels new_irq_info with new target info */
 182        cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid,
 183                                     new_irq_info->irq_slice);
 184        new_irq_info->irq_cpuid = cpuid;
 185
 186        pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
 187
 188        /*
 189         * If this represents a line interrupt, target it.  If it's
 190         * an msi (irq_int_bit < 0), it's already targeted.
 191         */
 192        if (new_irq_info->irq_int_bit >= 0 &&
 193            pci_provider && pci_provider->target_interrupt)
 194                (pci_provider->target_interrupt)(new_irq_info);
 195
 196#ifdef CONFIG_SMP
 197        cpuphys = cpu_physical_id(cpuid);
 198        set_irq_affinity_info((vector & 0xff), cpuphys, 0);
 199#endif
 200
 201        return new_irq_info;
 202}
 203
 204static int sn_set_affinity_irq(struct irq_data *data,
 205                               const struct cpumask *mask, bool force)
 206{
 207        struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
 208        unsigned int irq = data->irq;
 209        nasid_t nasid;
 210        int slice;
 211
 212        nasid = cpuid_to_nasid(cpumask_first(mask));
 213        slice = cpuid_to_slice(cpumask_first(mask));
 214
 215        list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
 216                                 sn_irq_lh[irq], list)
 217                (void)sn_retarget_vector(sn_irq_info, nasid, slice);
 218
 219        return 0;
 220}
 221
 222#ifdef CONFIG_SMP
 223void sn_set_err_irq_affinity(unsigned int irq)
 224{
 225        /*
 226         * On systems which support CPU disabling (SHub2), all error interrupts
 227         * are targeted at the boot CPU.
 228         */
 229        if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT))
 230                set_irq_affinity_info(irq, cpu_physical_id(0), 0);
 231}
 232#else
 233void sn_set_err_irq_affinity(unsigned int irq) { }
 234#endif
 235
 236static void
 237sn_mask_irq(struct irq_data *data)
 238{
 239}
 240
 241static void
 242sn_unmask_irq(struct irq_data *data)
 243{
 244}
 245
 246struct irq_chip irq_type_sn = {
 247        .name                   = "SN hub",
 248        .irq_startup            = sn_startup_irq,
 249        .irq_shutdown           = sn_shutdown_irq,
 250        .irq_enable             = sn_enable_irq,
 251        .irq_disable            = sn_disable_irq,
 252        .irq_ack                = sn_ack_irq,
 253        .irq_mask               = sn_mask_irq,
 254        .irq_unmask             = sn_unmask_irq,
 255        .irq_set_affinity       = sn_set_affinity_irq
 256};
 257
 258ia64_vector sn_irq_to_vector(int irq)
 259{
 260        if (irq >= IA64_NUM_VECTORS)
 261                return 0;
 262        return (ia64_vector)irq;
 263}
 264
 265unsigned int sn_local_vector_to_irq(u8 vector)
 266{
 267        return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
 268}
 269
 270void sn_irq_init(void)
 271{
 272        int i;
 273
 274        ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
 275        ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
 276
 277        for (i = 0; i < NR_IRQS; i++) {
 278                if (irq_get_chip(i) == &no_irq_chip)
 279                        irq_set_chip(i, &irq_type_sn);
 280        }
 281}
 282
 283static void register_intr_pda(struct sn_irq_info *sn_irq_info)
 284{
 285        int irq = sn_irq_info->irq_irq;
 286        int cpu = sn_irq_info->irq_cpuid;
 287
 288        if (pdacpu(cpu)->sn_last_irq < irq) {
 289                pdacpu(cpu)->sn_last_irq = irq;
 290        }
 291
 292        if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq)
 293                pdacpu(cpu)->sn_first_irq = irq;
 294}
 295
 296static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
 297{
 298        int irq = sn_irq_info->irq_irq;
 299        int cpu = sn_irq_info->irq_cpuid;
 300        struct sn_irq_info *tmp_irq_info;
 301        int i, foundmatch;
 302
 303        rcu_read_lock();
 304        if (pdacpu(cpu)->sn_last_irq == irq) {
 305                foundmatch = 0;
 306                for (i = pdacpu(cpu)->sn_last_irq - 1;
 307                     i && !foundmatch; i--) {
 308                        list_for_each_entry_rcu(tmp_irq_info,
 309                                                sn_irq_lh[i],
 310                                                list) {
 311                                if (tmp_irq_info->irq_cpuid == cpu) {
 312                                        foundmatch = 1;
 313                                        break;
 314                                }
 315                        }
 316                }
 317                pdacpu(cpu)->sn_last_irq = i;
 318        }
 319
 320        if (pdacpu(cpu)->sn_first_irq == irq) {
 321                foundmatch = 0;
 322                for (i = pdacpu(cpu)->sn_first_irq + 1;
 323                     i < NR_IRQS && !foundmatch; i++) {
 324                        list_for_each_entry_rcu(tmp_irq_info,
 325                                                sn_irq_lh[i],
 326                                                list) {
 327                                if (tmp_irq_info->irq_cpuid == cpu) {
 328                                        foundmatch = 1;
 329                                        break;
 330                                }
 331                        }
 332                }
 333                pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i);
 334        }
 335        rcu_read_unlock();
 336}
 337
 338void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
 339{
 340        nasid_t nasid = sn_irq_info->irq_nasid;
 341        int slice = sn_irq_info->irq_slice;
 342        int cpu = nasid_slice_to_cpuid(nasid, slice);
 343#ifdef CONFIG_SMP
 344        int cpuphys;
 345#endif
 346
 347        pci_dev_get(pci_dev);
 348        sn_irq_info->irq_cpuid = cpu;
 349        sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev);
 350
 351        /* link it into the sn_irq[irq] list */
 352        spin_lock(&sn_irq_info_lock);
 353        list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
 354        reserve_irq_vector(sn_irq_info->irq_irq);
 355        if (sn_irq_info->irq_int_bit != -1)
 356                irq_set_handler(sn_irq_info->irq_irq, handle_level_irq);
 357        spin_unlock(&sn_irq_info_lock);
 358
 359        register_intr_pda(sn_irq_info);
 360#ifdef CONFIG_SMP
 361        cpuphys = cpu_physical_id(cpu);
 362        set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0);
 363        /*
 364         * Affinity was set by the PROM, prevent it from
 365         * being reset by the request_irq() path.
 366         */
 367        irqd_mark_affinity_was_set(irq_get_irq_data(sn_irq_info->irq_irq));
 368#endif
 369}
 370
 371void sn_irq_unfixup(struct pci_dev *pci_dev)
 372{
 373        struct sn_irq_info *sn_irq_info;
 374
 375        /* Only cleanup IRQ stuff if this device has a host bus context */
 376        if (!SN_PCIDEV_BUSSOFT(pci_dev))
 377                return;
 378
 379        sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info;
 380        if (!sn_irq_info)
 381                return;
 382        if (!sn_irq_info->irq_irq) {
 383                kfree(sn_irq_info);
 384                return;
 385        }
 386
 387        unregister_intr_pda(sn_irq_info);
 388        spin_lock(&sn_irq_info_lock);
 389        list_del_rcu(&sn_irq_info->list);
 390        spin_unlock(&sn_irq_info_lock);
 391        if (list_empty(sn_irq_lh[sn_irq_info->irq_irq]))
 392                free_irq_vector(sn_irq_info->irq_irq);
 393        kfree_rcu(sn_irq_info, rcu);
 394        pci_dev_put(pci_dev);
 395
 396}
 397
 398static inline void
 399sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info)
 400{
 401        struct sn_pcibus_provider *pci_provider;
 402
 403        pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type];
 404
 405        /* Don't force an interrupt if the irq has been disabled */
 406        if (!irqd_irq_disabled(irq_get_irq_data(sn_irq_info->irq_irq)) &&
 407            pci_provider && pci_provider->force_interrupt)
 408                (*pci_provider->force_interrupt)(sn_irq_info);
 409}
 410
 411/*
 412 * Check for lost interrupts.  If the PIC int_status reg. says that
 413 * an interrupt has been sent, but not handled, and the interrupt
 414 * is not pending in either the cpu irr regs or in the soft irr regs,
 415 * and the interrupt is not in service, then the interrupt may have
 416 * been lost.  Force an interrupt on that pin.  It is possible that
 417 * the interrupt is in flight, so we may generate a spurious interrupt,
 418 * but we should never miss a real lost interrupt.
 419 */
 420static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
 421{
 422        u64 regval;
 423        struct pcidev_info *pcidev_info;
 424        struct pcibus_info *pcibus_info;
 425
 426        /*
 427         * Bridge types attached to TIO (anything but PIC) do not need this WAR
 428         * since they do not target Shub II interrupt registers.  If that
 429         * ever changes, this check needs to accommodate.
 430         */
 431        if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC)
 432                return;
 433
 434        pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
 435        if (!pcidev_info)
 436                return;
 437
 438        pcibus_info =
 439            (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
 440            pdi_pcibus_info;
 441        regval = pcireg_intr_status_get(pcibus_info);
 442
 443        if (!ia64_get_irr(irq_to_vector(irq))) {
 444                if (!test_bit(irq, pda->sn_in_service_ivecs)) {
 445                        regval &= 0xff;
 446                        if (sn_irq_info->irq_int_bit & regval &
 447                            sn_irq_info->irq_last_intr) {
 448                                regval &= ~(sn_irq_info->irq_int_bit & regval);
 449                                sn_call_force_intr_provider(sn_irq_info);
 450                        }
 451                }
 452        }
 453        sn_irq_info->irq_last_intr = regval;
 454}
 455
 456void sn_lb_int_war_check(void)
 457{
 458        struct sn_irq_info *sn_irq_info;
 459        int i;
 460
 461        if (!sn_ioif_inited || pda->sn_first_irq == 0)
 462                return;
 463
 464        rcu_read_lock();
 465        for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
 466                list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) {
 467                        sn_check_intr(i, sn_irq_info);
 468                }
 469        }
 470        rcu_read_unlock();
 471}
 472
 473void __init sn_irq_lh_init(void)
 474{
 475        int i;
 476
 477        sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL);
 478        if (!sn_irq_lh)
 479                panic("SN PCI INIT: Failed to allocate memory for PCI init\n");
 480
 481        for (i = 0; i < NR_IRQS; i++) {
 482                sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL);
 483                if (!sn_irq_lh[i])
 484                        panic("SN PCI INIT: Failed IRQ memory allocation\n");
 485
 486                INIT_LIST_HEAD(sn_irq_lh[i]);
 487        }
 488}
 489