linux/arch/blackfin/mach-common/ints-priority.c
<<
>>
Prefs
   1/*
   2 * Set up the interrupt priorities
   3 *
   4 * Copyright  2004-2009 Analog Devices Inc.
   5 *                 2003 Bas Vermeulen <bas@buyways.nl>
   6 *                 2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca>
   7 *            2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca>
   8 *                 1999 D. Jeff Dionne <jeff@uclinux.org>
   9 *                 1996 Roman Zippel
  10 *
  11 * Licensed under the GPL-2
  12 */
  13
  14#include <linux/module.h>
  15#include <linux/kernel_stat.h>
  16#include <linux/seq_file.h>
  17#include <linux/irq.h>
  18#include <linux/sched.h>
  19#include <linux/sched/debug.h>
  20#include <linux/syscore_ops.h>
  21#include <linux/gpio.h>
  22#include <asm/delay.h>
  23#ifdef CONFIG_IPIPE
  24#include <linux/ipipe.h>
  25#endif
  26#include <asm/traps.h>
  27#include <asm/blackfin.h>
  28#include <asm/irq_handler.h>
  29#include <asm/dpmc.h>
  30#include <asm/traps.h>
  31
  32/*
  33 * NOTES:
  34 * - we have separated the physical Hardware interrupt from the
  35 * levels that the LINUX kernel sees (see the description in irq.h)
  36 * -
  37 */
  38
  39#ifndef CONFIG_SMP
  40/* Initialize this to an actual value to force it into the .data
  41 * section so that we know it is properly initialized at entry into
  42 * the kernel but before bss is initialized to zero (which is where
  43 * it would live otherwise).  The 0x1f magic represents the IRQs we
  44 * cannot actually mask out in hardware.
  45 */
  46unsigned long bfin_irq_flags = 0x1f;
  47EXPORT_SYMBOL(bfin_irq_flags);
  48#endif
  49
  50#ifdef CONFIG_PM
  51unsigned long bfin_sic_iwr[3];  /* Up to 3 SIC_IWRx registers */
  52unsigned vr_wakeup;
  53#endif
  54
  55#ifndef SEC_GCTL
  56static struct ivgx {
  57        /* irq number for request_irq, available in mach-bf5xx/irq.h */
  58        unsigned int irqno;
  59        /* corresponding bit in the SIC_ISR register */
  60        unsigned int isrflag;
  61} ivg_table[NR_PERI_INTS];
  62
  63static struct ivg_slice {
  64        /* position of first irq in ivg_table for given ivg */
  65        struct ivgx *ifirst;
  66        struct ivgx *istop;
  67} ivg7_13[IVG13 - IVG7 + 1];
  68
  69
  70/*
  71 * Search SIC_IAR and fill tables with the irqvalues
  72 * and their positions in the SIC_ISR register.
  73 */
  74static void __init search_IAR(void)
  75{
  76        unsigned ivg, irq_pos = 0;
  77        for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) {
  78                int irqN;
  79
  80                ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos];
  81
  82                for (irqN = 0; irqN < NR_PERI_INTS; irqN += 4) {
  83                        int irqn;
  84                        u32 iar =
  85                                bfin_read32((unsigned long *)SIC_IAR0 +
  86#if defined(CONFIG_BF51x) || defined(CONFIG_BF52x) || \
  87        defined(CONFIG_BF538) || defined(CONFIG_BF539)
  88                                ((irqN % 32) >> 3) + ((irqN / 32) * ((SIC_IAR4 - SIC_IAR0) / 4))
  89#else
  90                                (irqN >> 3)
  91#endif
  92                                );
  93                        for (irqn = irqN; irqn < irqN + 4; ++irqn) {
  94                                int iar_shift = (irqn & 7) * 4;
  95                                if (ivg == (0xf & (iar >> iar_shift))) {
  96                                        ivg_table[irq_pos].irqno = IVG7 + irqn;
  97                                        ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
  98                                        ivg7_13[ivg].istop++;
  99                                        irq_pos++;
 100                                }
 101                        }
 102                }
 103        }
 104}
 105#endif
 106
 107/*
 108 * This is for core internal IRQs
 109 */
 110void bfin_ack_noop(struct irq_data *d)
 111{
 112        /* Dummy function.  */
 113}
 114
 115static void bfin_core_mask_irq(struct irq_data *d)
 116{
 117        bfin_irq_flags &= ~(1 << d->irq);
 118        if (!hard_irqs_disabled())
 119                hard_local_irq_enable();
 120}
 121
 122static void bfin_core_unmask_irq(struct irq_data *d)
 123{
 124        bfin_irq_flags |= 1 << d->irq;
 125        /*
 126         * If interrupts are enabled, IMASK must contain the same value
 127         * as bfin_irq_flags.  Make sure that invariant holds.  If interrupts
 128         * are currently disabled we need not do anything; one of the
 129         * callers will take care of setting IMASK to the proper value
 130         * when reenabling interrupts.
 131         * local_irq_enable just does "STI bfin_irq_flags", so it's exactly
 132         * what we need.
 133         */
 134        if (!hard_irqs_disabled())
 135                hard_local_irq_enable();
 136        return;
 137}
 138
 139#ifndef SEC_GCTL
 140void bfin_internal_mask_irq(unsigned int irq)
 141{
 142        unsigned long flags = hard_local_irq_save();
 143#ifdef SIC_IMASK0
 144        unsigned mask_bank = BFIN_SYSIRQ(irq) / 32;
 145        unsigned mask_bit = BFIN_SYSIRQ(irq) % 32;
 146        bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
 147                        ~(1 << mask_bit));
 148# if defined(CONFIG_SMP) || defined(CONFIG_ICC)
 149        bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
 150                        ~(1 << mask_bit));
 151# endif
 152#else
 153        bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
 154                        ~(1 << BFIN_SYSIRQ(irq)));
 155#endif /* end of SIC_IMASK0 */
 156        hard_local_irq_restore(flags);
 157}
 158
 159static void bfin_internal_mask_irq_chip(struct irq_data *d)
 160{
 161        bfin_internal_mask_irq(d->irq);
 162}
 163
 164#ifdef CONFIG_SMP
 165void bfin_internal_unmask_irq_affinity(unsigned int irq,
 166                const struct cpumask *affinity)
 167#else
 168void bfin_internal_unmask_irq(unsigned int irq)
 169#endif
 170{
 171        unsigned long flags = hard_local_irq_save();
 172
 173#ifdef SIC_IMASK0
 174        unsigned mask_bank = BFIN_SYSIRQ(irq) / 32;
 175        unsigned mask_bit = BFIN_SYSIRQ(irq) % 32;
 176# ifdef CONFIG_SMP
 177        if (cpumask_test_cpu(0, affinity))
 178# endif
 179                bfin_write_SIC_IMASK(mask_bank,
 180                                bfin_read_SIC_IMASK(mask_bank) |
 181                                (1 << mask_bit));
 182# ifdef CONFIG_SMP
 183        if (cpumask_test_cpu(1, affinity))
 184                bfin_write_SICB_IMASK(mask_bank,
 185                                bfin_read_SICB_IMASK(mask_bank) |
 186                                (1 << mask_bit));
 187# endif
 188#else
 189        bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
 190                        (1 << BFIN_SYSIRQ(irq)));
 191#endif
 192        hard_local_irq_restore(flags);
 193}
 194
 195#ifdef CONFIG_SMP
 196static void bfin_internal_unmask_irq_chip(struct irq_data *d)
 197{
 198        bfin_internal_unmask_irq_affinity(d->irq,
 199                                          irq_data_get_affinity_mask(d));
 200}
 201
 202static int bfin_internal_set_affinity(struct irq_data *d,
 203                                      const struct cpumask *mask, bool force)
 204{
 205        bfin_internal_mask_irq(d->irq);
 206        bfin_internal_unmask_irq_affinity(d->irq, mask);
 207
 208        return 0;
 209}
 210#else
 211static void bfin_internal_unmask_irq_chip(struct irq_data *d)
 212{
 213        bfin_internal_unmask_irq(d->irq);
 214}
 215#endif
 216
 217#if defined(CONFIG_PM)
 218int bfin_internal_set_wake(unsigned int irq, unsigned int state)
 219{
 220        u32 bank, bit, wakeup = 0;
 221        unsigned long flags;
 222        bank = BFIN_SYSIRQ(irq) / 32;
 223        bit = BFIN_SYSIRQ(irq) % 32;
 224
 225        switch (irq) {
 226#ifdef IRQ_RTC
 227        case IRQ_RTC:
 228        wakeup |= WAKE;
 229        break;
 230#endif
 231#ifdef IRQ_CAN0_RX
 232        case IRQ_CAN0_RX:
 233        wakeup |= CANWE;
 234        break;
 235#endif
 236#ifdef IRQ_CAN1_RX
 237        case IRQ_CAN1_RX:
 238        wakeup |= CANWE;
 239        break;
 240#endif
 241#ifdef IRQ_USB_INT0
 242        case IRQ_USB_INT0:
 243        wakeup |= USBWE;
 244        break;
 245#endif
 246#ifdef CONFIG_BF54x
 247        case IRQ_CNT:
 248        wakeup |= ROTWE;
 249        break;
 250#endif
 251        default:
 252        break;
 253        }
 254
 255        flags = hard_local_irq_save();
 256
 257        if (state) {
 258                bfin_sic_iwr[bank] |= (1 << bit);
 259                vr_wakeup  |= wakeup;
 260
 261        } else {
 262                bfin_sic_iwr[bank] &= ~(1 << bit);
 263                vr_wakeup  &= ~wakeup;
 264        }
 265
 266        hard_local_irq_restore(flags);
 267
 268        return 0;
 269}
 270
 271static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state)
 272{
 273        return bfin_internal_set_wake(d->irq, state);
 274}
 275#else
 276inline int bfin_internal_set_wake(unsigned int irq, unsigned int state)
 277{
 278        return 0;
 279}
 280# define bfin_internal_set_wake_chip NULL
 281#endif
 282
 283#else /* SEC_GCTL */
 284static void bfin_sec_preflow_handler(struct irq_data *d)
 285{
 286        unsigned long flags = hard_local_irq_save();
 287        unsigned int sid = BFIN_SYSIRQ(d->irq);
 288
 289        bfin_write_SEC_SCI(0, SEC_CSID, sid);
 290
 291        hard_local_irq_restore(flags);
 292}
 293
 294static void bfin_sec_mask_ack_irq(struct irq_data *d)
 295{
 296        unsigned long flags = hard_local_irq_save();
 297        unsigned int sid = BFIN_SYSIRQ(d->irq);
 298
 299        bfin_write_SEC_SCI(0, SEC_CSID, sid);
 300
 301        hard_local_irq_restore(flags);
 302}
 303
 304static void bfin_sec_unmask_irq(struct irq_data *d)
 305{
 306        unsigned long flags = hard_local_irq_save();
 307        unsigned int sid = BFIN_SYSIRQ(d->irq);
 308
 309        bfin_write32(SEC_END, sid);
 310
 311        hard_local_irq_restore(flags);
 312}
 313
 314static void bfin_sec_enable_ssi(unsigned int sid)
 315{
 316        unsigned long flags = hard_local_irq_save();
 317        uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
 318
 319        reg_sctl |= SEC_SCTL_SRC_EN;
 320        bfin_write_SEC_SCTL(sid, reg_sctl);
 321
 322        hard_local_irq_restore(flags);
 323}
 324
 325static void bfin_sec_disable_ssi(unsigned int sid)
 326{
 327        unsigned long flags = hard_local_irq_save();
 328        uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
 329
 330        reg_sctl &= ((uint32_t)~SEC_SCTL_SRC_EN);
 331        bfin_write_SEC_SCTL(sid, reg_sctl);
 332
 333        hard_local_irq_restore(flags);
 334}
 335
 336static void bfin_sec_set_ssi_coreid(unsigned int sid, unsigned int coreid)
 337{
 338        unsigned long flags = hard_local_irq_save();
 339        uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
 340
 341        reg_sctl &= ((uint32_t)~SEC_SCTL_CTG);
 342        bfin_write_SEC_SCTL(sid, reg_sctl | ((coreid << 20) & SEC_SCTL_CTG));
 343
 344        hard_local_irq_restore(flags);
 345}
 346
 347static void bfin_sec_enable_sci(unsigned int sid)
 348{
 349        unsigned long flags = hard_local_irq_save();
 350        uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
 351
 352        if (sid == BFIN_SYSIRQ(IRQ_WATCH0))
 353                reg_sctl |= SEC_SCTL_FAULT_EN;
 354        else
 355                reg_sctl |= SEC_SCTL_INT_EN;
 356        bfin_write_SEC_SCTL(sid, reg_sctl);
 357
 358        hard_local_irq_restore(flags);
 359}
 360
 361static void bfin_sec_disable_sci(unsigned int sid)
 362{
 363        unsigned long flags = hard_local_irq_save();
 364        uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
 365
 366        reg_sctl &= ((uint32_t)~SEC_SCTL_INT_EN);
 367        bfin_write_SEC_SCTL(sid, reg_sctl);
 368
 369        hard_local_irq_restore(flags);
 370}
 371
 372static void bfin_sec_enable(struct irq_data *d)
 373{
 374        unsigned long flags = hard_local_irq_save();
 375        unsigned int sid = BFIN_SYSIRQ(d->irq);
 376
 377        bfin_sec_enable_sci(sid);
 378        bfin_sec_enable_ssi(sid);
 379
 380        hard_local_irq_restore(flags);
 381}
 382
 383static void bfin_sec_disable(struct irq_data *d)
 384{
 385        unsigned long flags = hard_local_irq_save();
 386        unsigned int sid = BFIN_SYSIRQ(d->irq);
 387
 388        bfin_sec_disable_sci(sid);
 389        bfin_sec_disable_ssi(sid);
 390
 391        hard_local_irq_restore(flags);
 392}
 393
 394static void bfin_sec_set_priority(unsigned int sec_int_levels, u8 *sec_int_priority)
 395{
 396        unsigned long flags = hard_local_irq_save();
 397        uint32_t reg_sctl;
 398        int i;
 399
 400        bfin_write_SEC_SCI(0, SEC_CPLVL, sec_int_levels);
 401
 402        for (i = 0; i < SYS_IRQS - BFIN_IRQ(0); i++) {
 403                reg_sctl = bfin_read_SEC_SCTL(i) & ~SEC_SCTL_PRIO;
 404                reg_sctl |= sec_int_priority[i] << SEC_SCTL_PRIO_OFFSET;
 405                bfin_write_SEC_SCTL(i, reg_sctl);
 406        }
 407
 408        hard_local_irq_restore(flags);
 409}
 410
 411void bfin_sec_raise_irq(unsigned int irq)
 412{
 413        unsigned long flags = hard_local_irq_save();
 414        unsigned int sid = BFIN_SYSIRQ(irq);
 415
 416        bfin_write32(SEC_RAISE, sid);
 417
 418        hard_local_irq_restore(flags);
 419}
 420
 421static void init_software_driven_irq(void)
 422{
 423        bfin_sec_set_ssi_coreid(34, 0);
 424        bfin_sec_set_ssi_coreid(35, 1);
 425
 426        bfin_sec_enable_sci(35);
 427        bfin_sec_enable_ssi(35);
 428        bfin_sec_set_ssi_coreid(36, 0);
 429        bfin_sec_set_ssi_coreid(37, 1);
 430        bfin_sec_enable_sci(37);
 431        bfin_sec_enable_ssi(37);
 432}
 433
 434void handle_sec_sfi_fault(uint32_t gstat)
 435{
 436
 437}
 438
 439void handle_sec_sci_fault(uint32_t gstat)
 440{
 441        uint32_t core_id;
 442        uint32_t cstat;
 443
 444        core_id = gstat & SEC_GSTAT_SCI;
 445        cstat = bfin_read_SEC_SCI(core_id, SEC_CSTAT);
 446        if (cstat & SEC_CSTAT_ERR) {
 447                switch (cstat & SEC_CSTAT_ERRC) {
 448                case SEC_CSTAT_ACKERR:
 449                        printk(KERN_DEBUG "sec ack err\n");
 450                        break;
 451                default:
 452                        printk(KERN_DEBUG "sec sci unknown err\n");
 453                }
 454        }
 455
 456}
 457
 458void handle_sec_ssi_fault(uint32_t gstat)
 459{
 460        uint32_t sid;
 461        uint32_t sstat;
 462
 463        sid = gstat & SEC_GSTAT_SID;
 464        sstat = bfin_read_SEC_SSTAT(sid);
 465
 466}
 467
 468void handle_sec_fault(uint32_t sec_gstat)
 469{
 470        if (sec_gstat & SEC_GSTAT_ERR) {
 471
 472                switch (sec_gstat & SEC_GSTAT_ERRC) {
 473                case 0:
 474                        handle_sec_sfi_fault(sec_gstat);
 475                        break;
 476                case SEC_GSTAT_SCIERR:
 477                        handle_sec_sci_fault(sec_gstat);
 478                        break;
 479                case SEC_GSTAT_SSIERR:
 480                        handle_sec_ssi_fault(sec_gstat);
 481                        break;
 482                }
 483
 484
 485        }
 486}
 487
 488static struct irqaction bfin_fault_irq = {
 489        .name = "Blackfin fault",
 490};
 491
 492static irqreturn_t bfin_fault_routine(int irq, void *data)
 493{
 494        struct pt_regs *fp = get_irq_regs();
 495
 496        switch (irq) {
 497        case IRQ_C0_DBL_FAULT:
 498                double_fault_c(fp);
 499                break;
 500        case IRQ_C0_HW_ERR:
 501                dump_bfin_process(fp);
 502                dump_bfin_mem(fp);
 503                show_regs(fp);
 504                printk(KERN_NOTICE "Kernel Stack\n");
 505                show_stack(current, NULL);
 506                print_modules();
 507                panic("Core 0 hardware error");
 508                break;
 509        case IRQ_C0_NMI_L1_PARITY_ERR:
 510                panic("Core 0 NMI L1 parity error");
 511                break;
 512        case IRQ_SEC_ERR:
 513                pr_err("SEC error\n");
 514                handle_sec_fault(bfin_read32(SEC_GSTAT));
 515                break;
 516        default:
 517                panic("Unknown fault %d", irq);
 518        }
 519
 520        return IRQ_HANDLED;
 521}
 522#endif /* SEC_GCTL */
 523
 524static struct irq_chip bfin_core_irqchip = {
 525        .name = "CORE",
 526        .irq_mask = bfin_core_mask_irq,
 527        .irq_unmask = bfin_core_unmask_irq,
 528};
 529
 530#ifndef SEC_GCTL
 531static struct irq_chip bfin_internal_irqchip = {
 532        .name = "INTN",
 533        .irq_mask = bfin_internal_mask_irq_chip,
 534        .irq_unmask = bfin_internal_unmask_irq_chip,
 535        .irq_disable = bfin_internal_mask_irq_chip,
 536        .irq_enable = bfin_internal_unmask_irq_chip,
 537#ifdef CONFIG_SMP
 538        .irq_set_affinity = bfin_internal_set_affinity,
 539#endif
 540        .irq_set_wake = bfin_internal_set_wake_chip,
 541};
 542#else
 543static struct irq_chip bfin_sec_irqchip = {
 544        .name = "SEC",
 545        .irq_mask_ack = bfin_sec_mask_ack_irq,
 546        .irq_mask = bfin_sec_mask_ack_irq,
 547        .irq_unmask = bfin_sec_unmask_irq,
 548        .irq_eoi = bfin_sec_unmask_irq,
 549        .irq_disable = bfin_sec_disable,
 550        .irq_enable = bfin_sec_enable,
 551};
 552#endif
 553
 554void bfin_handle_irq(unsigned irq)
 555{
 556#ifdef CONFIG_IPIPE
 557        struct pt_regs regs;    /* Contents not used. */
 558        ipipe_trace_irq_entry(irq);
 559        __ipipe_handle_irq(irq, &regs);
 560        ipipe_trace_irq_exit(irq);
 561#else /* !CONFIG_IPIPE */
 562        generic_handle_irq(irq);
 563#endif  /* !CONFIG_IPIPE */
 564}
 565
 566#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
 567static int mac_stat_int_mask;
 568
 569static void bfin_mac_status_ack_irq(unsigned int irq)
 570{
 571        switch (irq) {
 572        case IRQ_MAC_MMCINT:
 573                bfin_write_EMAC_MMC_TIRQS(
 574                        bfin_read_EMAC_MMC_TIRQE() &
 575                        bfin_read_EMAC_MMC_TIRQS());
 576                bfin_write_EMAC_MMC_RIRQS(
 577                        bfin_read_EMAC_MMC_RIRQE() &
 578                        bfin_read_EMAC_MMC_RIRQS());
 579                break;
 580        case IRQ_MAC_RXFSINT:
 581                bfin_write_EMAC_RX_STKY(
 582                        bfin_read_EMAC_RX_IRQE() &
 583                        bfin_read_EMAC_RX_STKY());
 584                break;
 585        case IRQ_MAC_TXFSINT:
 586                bfin_write_EMAC_TX_STKY(
 587                        bfin_read_EMAC_TX_IRQE() &
 588                        bfin_read_EMAC_TX_STKY());
 589                break;
 590        case IRQ_MAC_WAKEDET:
 591                 bfin_write_EMAC_WKUP_CTL(
 592                        bfin_read_EMAC_WKUP_CTL() | MPKS | RWKS);
 593                break;
 594        default:
 595                /* These bits are W1C */
 596                bfin_write_EMAC_SYSTAT(1L << (irq - IRQ_MAC_PHYINT));
 597                break;
 598        }
 599}
 600
 601static void bfin_mac_status_mask_irq(struct irq_data *d)
 602{
 603        unsigned int irq = d->irq;
 604
 605        mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT));
 606#ifdef BF537_FAMILY
 607        switch (irq) {
 608        case IRQ_MAC_PHYINT:
 609                bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE);
 610                break;
 611        default:
 612                break;
 613        }
 614#else
 615        if (!mac_stat_int_mask)
 616                bfin_internal_mask_irq(IRQ_MAC_ERROR);
 617#endif
 618        bfin_mac_status_ack_irq(irq);
 619}
 620
 621static void bfin_mac_status_unmask_irq(struct irq_data *d)
 622{
 623        unsigned int irq = d->irq;
 624
 625#ifdef BF537_FAMILY
 626        switch (irq) {
 627        case IRQ_MAC_PHYINT:
 628                bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE);
 629                break;
 630        default:
 631                break;
 632        }
 633#else
 634        if (!mac_stat_int_mask)
 635                bfin_internal_unmask_irq(IRQ_MAC_ERROR);
 636#endif
 637        mac_stat_int_mask |= 1L << (irq - IRQ_MAC_PHYINT);
 638}
 639
 640#ifdef CONFIG_PM
 641int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state)
 642{
 643#ifdef BF537_FAMILY
 644        return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state);
 645#else
 646        return bfin_internal_set_wake(IRQ_MAC_ERROR, state);
 647#endif
 648}
 649#else
 650# define bfin_mac_status_set_wake NULL
 651#endif
 652
 653static struct irq_chip bfin_mac_status_irqchip = {
 654        .name = "MACST",
 655        .irq_mask = bfin_mac_status_mask_irq,
 656        .irq_unmask = bfin_mac_status_unmask_irq,
 657        .irq_set_wake = bfin_mac_status_set_wake,
 658};
 659
 660void bfin_demux_mac_status_irq(struct irq_desc *inta_desc)
 661{
 662        int i, irq = 0;
 663        u32 status = bfin_read_EMAC_SYSTAT();
 664
 665        for (i = 0; i <= (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++)
 666                if (status & (1L << i)) {
 667                        irq = IRQ_MAC_PHYINT + i;
 668                        break;
 669                }
 670
 671        if (irq) {
 672                if (mac_stat_int_mask & (1L << (irq - IRQ_MAC_PHYINT))) {
 673                        bfin_handle_irq(irq);
 674                } else {
 675                        bfin_mac_status_ack_irq(irq);
 676                        pr_debug("IRQ %d:"
 677                                        " MASKED MAC ERROR INTERRUPT ASSERTED\n",
 678                                        irq);
 679                }
 680        } else
 681                printk(KERN_ERR
 682                                "%s : %s : LINE %d :\nIRQ ?: MAC ERROR"
 683                                " INTERRUPT ASSERTED BUT NO SOURCE FOUND"
 684                                "(EMAC_SYSTAT=0x%X)\n",
 685                                __func__, __FILE__, __LINE__, status);
 686}
 687#endif
 688
 689static inline void bfin_set_irq_handler(struct irq_data *d, irq_flow_handler_t handle)
 690{
 691#ifdef CONFIG_IPIPE
 692        handle = handle_level_irq;
 693#endif
 694        irq_set_handler_locked(d, handle);
 695}
 696
 697#ifdef CONFIG_GPIO_ADI
 698
 699static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
 700
 701static void bfin_gpio_ack_irq(struct irq_data *d)
 702{
 703        /* AFAIK ack_irq in case mask_ack is provided
 704         * get's only called for edge sense irqs
 705         */
 706        set_gpio_data(irq_to_gpio(d->irq), 0);
 707}
 708
 709static void bfin_gpio_mask_ack_irq(struct irq_data *d)
 710{
 711        unsigned int irq = d->irq;
 712        u32 gpionr = irq_to_gpio(irq);
 713
 714        if (!irqd_is_level_type(d))
 715                set_gpio_data(gpionr, 0);
 716
 717        set_gpio_maska(gpionr, 0);
 718}
 719
 720static void bfin_gpio_mask_irq(struct irq_data *d)
 721{
 722        set_gpio_maska(irq_to_gpio(d->irq), 0);
 723}
 724
 725static void bfin_gpio_unmask_irq(struct irq_data *d)
 726{
 727        set_gpio_maska(irq_to_gpio(d->irq), 1);
 728}
 729
 730static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
 731{
 732        u32 gpionr = irq_to_gpio(d->irq);
 733
 734        if (__test_and_set_bit(gpionr, gpio_enabled))
 735                bfin_gpio_irq_prepare(gpionr);
 736
 737        bfin_gpio_unmask_irq(d);
 738
 739        return 0;
 740}
 741
 742static void bfin_gpio_irq_shutdown(struct irq_data *d)
 743{
 744        u32 gpionr = irq_to_gpio(d->irq);
 745
 746        bfin_gpio_mask_irq(d);
 747        __clear_bit(gpionr, gpio_enabled);
 748        bfin_gpio_irq_free(gpionr);
 749}
 750
 751static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
 752{
 753        unsigned int irq = d->irq;
 754        int ret;
 755        char buf[16];
 756        u32 gpionr = irq_to_gpio(irq);
 757
 758        if (type == IRQ_TYPE_PROBE) {
 759                /* only probe unenabled GPIO interrupt lines */
 760                if (test_bit(gpionr, gpio_enabled))
 761                        return 0;
 762                type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
 763        }
 764
 765        if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
 766                    IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
 767
 768                snprintf(buf, 16, "gpio-irq%d", irq);
 769                ret = bfin_gpio_irq_request(gpionr, buf);
 770                if (ret)
 771                        return ret;
 772
 773                if (__test_and_set_bit(gpionr, gpio_enabled))
 774                        bfin_gpio_irq_prepare(gpionr);
 775
 776        } else {
 777                __clear_bit(gpionr, gpio_enabled);
 778                return 0;
 779        }
 780
 781        set_gpio_inen(gpionr, 0);
 782        set_gpio_dir(gpionr, 0);
 783
 784        if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
 785            == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
 786                set_gpio_both(gpionr, 1);
 787        else
 788                set_gpio_both(gpionr, 0);
 789
 790        if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
 791                set_gpio_polar(gpionr, 1);      /* low or falling edge denoted by one */
 792        else
 793                set_gpio_polar(gpionr, 0);      /* high or rising edge denoted by zero */
 794
 795        if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
 796                set_gpio_edge(gpionr, 1);
 797                set_gpio_inen(gpionr, 1);
 798                set_gpio_data(gpionr, 0);
 799
 800        } else {
 801                set_gpio_edge(gpionr, 0);
 802                set_gpio_inen(gpionr, 1);
 803        }
 804
 805        if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
 806                bfin_set_irq_handler(d, handle_edge_irq);
 807        else
 808                bfin_set_irq_handler(d, handle_level_irq);
 809
 810        return 0;
 811}
 812
 813static void bfin_demux_gpio_block(unsigned int irq)
 814{
 815        unsigned int gpio, mask;
 816
 817        gpio = irq_to_gpio(irq);
 818        mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
 819
 820        while (mask) {
 821                if (mask & 1)
 822                        bfin_handle_irq(irq);
 823                irq++;
 824                mask >>= 1;
 825        }
 826}
 827
 828void bfin_demux_gpio_irq(struct irq_desc *desc)
 829{
 830        unsigned int inta_irq = irq_desc_get_irq(desc);
 831        unsigned int irq;
 832
 833        switch (inta_irq) {
 834#if defined(BF537_FAMILY)
 835        case IRQ_PF_INTA_PG_INTA:
 836                bfin_demux_gpio_block(IRQ_PF0);
 837                irq = IRQ_PG0;
 838                break;
 839        case IRQ_PH_INTA_MAC_RX:
 840                irq = IRQ_PH0;
 841                break;
 842#elif defined(BF533_FAMILY)
 843        case IRQ_PROG_INTA:
 844                irq = IRQ_PF0;
 845                break;
 846#elif defined(BF538_FAMILY)
 847        case IRQ_PORTF_INTA:
 848                irq = IRQ_PF0;
 849                break;
 850#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
 851        case IRQ_PORTF_INTA:
 852                irq = IRQ_PF0;
 853                break;
 854        case IRQ_PORTG_INTA:
 855                irq = IRQ_PG0;
 856                break;
 857        case IRQ_PORTH_INTA:
 858                irq = IRQ_PH0;
 859                break;
 860#elif defined(CONFIG_BF561)
 861        case IRQ_PROG0_INTA:
 862                irq = IRQ_PF0;
 863                break;
 864        case IRQ_PROG1_INTA:
 865                irq = IRQ_PF16;
 866                break;
 867        case IRQ_PROG2_INTA:
 868                irq = IRQ_PF32;
 869                break;
 870#endif
 871        default:
 872                BUG();
 873                return;
 874        }
 875
 876        bfin_demux_gpio_block(irq);
 877}
 878
 879#ifdef CONFIG_PM
 880
 881static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
 882{
 883        return bfin_gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state);
 884}
 885
 886#else
 887
 888# define bfin_gpio_set_wake NULL
 889
 890#endif
 891
 892static struct irq_chip bfin_gpio_irqchip = {
 893        .name = "GPIO",
 894        .irq_ack = bfin_gpio_ack_irq,
 895        .irq_mask = bfin_gpio_mask_irq,
 896        .irq_mask_ack = bfin_gpio_mask_ack_irq,
 897        .irq_unmask = bfin_gpio_unmask_irq,
 898        .irq_disable = bfin_gpio_mask_irq,
 899        .irq_enable = bfin_gpio_unmask_irq,
 900        .irq_set_type = bfin_gpio_irq_type,
 901        .irq_startup = bfin_gpio_irq_startup,
 902        .irq_shutdown = bfin_gpio_irq_shutdown,
 903        .irq_set_wake = bfin_gpio_set_wake,
 904};
 905
 906#endif
 907
 908#ifdef CONFIG_PM
 909
 910#ifdef SEC_GCTL
 911static u32 save_pint_sec_ctl[NR_PINT_SYS_IRQS];
 912
 913static int sec_suspend(void)
 914{
 915        u32 bank;
 916
 917        for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++)
 918                save_pint_sec_ctl[bank] = bfin_read_SEC_SCTL(bank + BFIN_SYSIRQ(IRQ_PINT0));
 919        return 0;
 920}
 921
 922static void sec_resume(void)
 923{
 924        u32 bank;
 925
 926        bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
 927        udelay(100);
 928        bfin_write_SEC_GCTL(SEC_GCTL_EN);
 929        bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
 930
 931        for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++)
 932                bfin_write_SEC_SCTL(bank + BFIN_SYSIRQ(IRQ_PINT0), save_pint_sec_ctl[bank]);
 933}
 934
 935static struct syscore_ops sec_pm_syscore_ops = {
 936        .suspend = sec_suspend,
 937        .resume = sec_resume,
 938};
 939#endif
 940
 941#endif
 942
 943void init_exception_vectors(void)
 944{
 945        /* cannot program in software:
 946         * evt0 - emulation (jtag)
 947         * evt1 - reset
 948         */
 949        bfin_write_EVT2(evt_nmi);
 950        bfin_write_EVT3(trap);
 951        bfin_write_EVT5(evt_ivhw);
 952        bfin_write_EVT6(evt_timer);
 953        bfin_write_EVT7(evt_evt7);
 954        bfin_write_EVT8(evt_evt8);
 955        bfin_write_EVT9(evt_evt9);
 956        bfin_write_EVT10(evt_evt10);
 957        bfin_write_EVT11(evt_evt11);
 958        bfin_write_EVT12(evt_evt12);
 959        bfin_write_EVT13(evt_evt13);
 960        bfin_write_EVT14(evt_evt14);
 961        bfin_write_EVT15(evt_system_call);
 962        CSYNC();
 963}
 964
 965#ifndef SEC_GCTL
 966/*
 967 * This function should be called during kernel startup to initialize
 968 * the BFin IRQ handling routines.
 969 */
 970
 971int __init init_arch_irq(void)
 972{
 973        int irq;
 974        unsigned long ilat = 0;
 975
 976        /*  Disable all the peripheral intrs  - page 4-29 HW Ref manual */
 977#ifdef SIC_IMASK0
 978        bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
 979        bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
 980# ifdef SIC_IMASK2
 981        bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
 982# endif
 983# if defined(CONFIG_SMP) || defined(CONFIG_ICC)
 984        bfin_write_SICB_IMASK0(SIC_UNMASK_ALL);
 985        bfin_write_SICB_IMASK1(SIC_UNMASK_ALL);
 986# endif
 987#else
 988        bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
 989#endif
 990
 991        local_irq_disable();
 992
 993        for (irq = 0; irq <= SYS_IRQS; irq++) {
 994                if (irq <= IRQ_CORETMR)
 995                        irq_set_chip(irq, &bfin_core_irqchip);
 996                else
 997                        irq_set_chip(irq, &bfin_internal_irqchip);
 998
 999                switch (irq) {
1000#if !BFIN_GPIO_PINT
1001#if defined(BF537_FAMILY)
1002                case IRQ_PH_INTA_MAC_RX:
1003                case IRQ_PF_INTA_PG_INTA:
1004#elif defined(BF533_FAMILY)
1005                case IRQ_PROG_INTA:
1006#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
1007                case IRQ_PORTF_INTA:
1008                case IRQ_PORTG_INTA:
1009                case IRQ_PORTH_INTA:
1010#elif defined(CONFIG_BF561)
1011                case IRQ_PROG0_INTA:
1012                case IRQ_PROG1_INTA:
1013                case IRQ_PROG2_INTA:
1014#elif defined(BF538_FAMILY)
1015                case IRQ_PORTF_INTA:
1016#endif
1017                        irq_set_chained_handler(irq, bfin_demux_gpio_irq);
1018                        break;
1019#endif
1020#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1021                case IRQ_MAC_ERROR:
1022                        irq_set_chained_handler(irq,
1023                                                bfin_demux_mac_status_irq);
1024                        break;
1025#endif
1026#if defined(CONFIG_SMP) || defined(CONFIG_ICC)
1027                case IRQ_SUPPLE_0:
1028                case IRQ_SUPPLE_1:
1029                        irq_set_handler(irq, handle_percpu_irq);
1030                        break;
1031#endif
1032
1033#ifdef CONFIG_TICKSOURCE_CORETMR
1034                case IRQ_CORETMR:
1035# ifdef CONFIG_SMP
1036                        irq_set_handler(irq, handle_percpu_irq);
1037# else
1038                        irq_set_handler(irq, handle_simple_irq);
1039# endif
1040                        break;
1041#endif
1042
1043#ifdef CONFIG_TICKSOURCE_GPTMR0
1044                case IRQ_TIMER0:
1045                        irq_set_handler(irq, handle_simple_irq);
1046                        break;
1047#endif
1048
1049                default:
1050#ifdef CONFIG_IPIPE
1051                        irq_set_handler(irq, handle_level_irq);
1052#else
1053                        irq_set_handler(irq, handle_simple_irq);
1054#endif
1055                        break;
1056                }
1057        }
1058
1059        init_mach_irq();
1060
1061#if (defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
1062        for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
1063                irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip,
1064                                         handle_level_irq);
1065#endif
1066        /* if configured as edge, then will be changed to do_edge_IRQ */
1067#ifdef CONFIG_GPIO_ADI
1068        for (irq = GPIO_IRQ_BASE;
1069                irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
1070                irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
1071                                         handle_level_irq);
1072#endif
1073        bfin_write_IMASK(0);
1074        CSYNC();
1075        ilat = bfin_read_ILAT();
1076        CSYNC();
1077        bfin_write_ILAT(ilat);
1078        CSYNC();
1079
1080        printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
1081        /* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx,
1082         * local_irq_enable()
1083         */
1084        program_IAR();
1085        /* Therefore it's better to setup IARs before interrupts enabled */
1086        search_IAR();
1087
1088        /* Enable interrupts IVG7-15 */
1089        bfin_irq_flags |= IMASK_IVG15 |
1090                IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1091                IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1092
1093
1094        /* This implicitly covers ANOMALY_05000171
1095         * Boot-ROM code modifies SICA_IWRx wakeup registers
1096         */
1097#ifdef SIC_IWR0
1098        bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
1099# ifdef SIC_IWR1
1100        /* BF52x/BF51x system reset does not properly reset SIC_IWR1 which
1101         * will screw up the bootrom as it relies on MDMA0/1 waking it
1102         * up from IDLE instructions.  See this report for more info:
1103         * http://blackfin.uclinux.org/gf/tracker/4323
1104         */
1105        if (ANOMALY_05000435)
1106                bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
1107        else
1108                bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
1109# endif
1110# ifdef SIC_IWR2
1111        bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
1112# endif
1113#else
1114        bfin_write_SIC_IWR(IWR_DISABLE_ALL);
1115#endif
1116        return 0;
1117}
1118
1119#ifdef CONFIG_DO_IRQ_L1
1120__attribute__((l1_text))
1121#endif
1122static int vec_to_irq(int vec)
1123{
1124        struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
1125        struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
1126        unsigned long sic_status[3];
1127        if (likely(vec == EVT_IVTMR_P))
1128                return IRQ_CORETMR;
1129#ifdef SIC_ISR
1130        sic_status[0] = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1131#else
1132        if (smp_processor_id()) {
1133# ifdef SICB_ISR0
1134                /* This will be optimized out in UP mode. */
1135                sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
1136                sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
1137# endif
1138        } else {
1139                sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1140                sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1141        }
1142#endif
1143#ifdef SIC_ISR2
1144        sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1145#endif
1146
1147        for (;; ivg++) {
1148                if (ivg >= ivg_stop)
1149                        return -1;
1150#ifdef SIC_ISR
1151                if (sic_status[0] & ivg->isrflag)
1152#else
1153                if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1154#endif
1155                        return ivg->irqno;
1156        }
1157}
1158
1159#else /* SEC_GCTL */
1160
1161/*
1162 * This function should be called during kernel startup to initialize
1163 * the BFin IRQ handling routines.
1164 */
1165
1166int __init init_arch_irq(void)
1167{
1168        int irq;
1169        unsigned long ilat = 0;
1170
1171        bfin_write_SEC_GCTL(SEC_GCTL_RESET);
1172
1173        local_irq_disable();
1174
1175        for (irq = 0; irq <= SYS_IRQS; irq++) {
1176                if (irq <= IRQ_CORETMR) {
1177                        irq_set_chip_and_handler(irq, &bfin_core_irqchip,
1178                                handle_simple_irq);
1179#if defined(CONFIG_TICKSOURCE_CORETMR) && defined(CONFIG_SMP)
1180                        if (irq == IRQ_CORETMR)
1181                                irq_set_handler(irq, handle_percpu_irq);
1182#endif
1183                } else if (irq >= BFIN_IRQ(34) && irq <= BFIN_IRQ(37)) {
1184                        irq_set_chip_and_handler(irq, &bfin_sec_irqchip,
1185                                handle_percpu_irq);
1186                } else {
1187                        irq_set_chip(irq, &bfin_sec_irqchip);
1188                        irq_set_handler(irq, handle_fasteoi_irq);
1189                        __irq_set_preflow_handler(irq, bfin_sec_preflow_handler);
1190                }
1191        }
1192
1193        bfin_write_IMASK(0);
1194        CSYNC();
1195        ilat = bfin_read_ILAT();
1196        CSYNC();
1197        bfin_write_ILAT(ilat);
1198        CSYNC();
1199
1200        printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
1201
1202        bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
1203
1204        /* Enable interrupts IVG7-15 */
1205        bfin_irq_flags |= IMASK_IVG15 |
1206            IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1207            IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1208
1209
1210        bfin_write_SEC_FCTL(SEC_FCTL_EN | SEC_FCTL_SYSRST_EN | SEC_FCTL_FLTIN_EN);
1211        bfin_sec_enable_sci(BFIN_SYSIRQ(IRQ_WATCH0));
1212        bfin_sec_enable_ssi(BFIN_SYSIRQ(IRQ_WATCH0));
1213        bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
1214        udelay(100);
1215        bfin_write_SEC_GCTL(SEC_GCTL_EN);
1216        bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
1217        bfin_write_SEC_SCI(1, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
1218
1219        init_software_driven_irq();
1220
1221#ifdef CONFIG_PM
1222        register_syscore_ops(&sec_pm_syscore_ops);
1223#endif
1224
1225        bfin_fault_irq.handler = bfin_fault_routine;
1226#ifdef CONFIG_L1_PARITY_CHECK
1227        setup_irq(IRQ_C0_NMI_L1_PARITY_ERR, &bfin_fault_irq);
1228#endif
1229        setup_irq(IRQ_C0_DBL_FAULT, &bfin_fault_irq);
1230        setup_irq(IRQ_SEC_ERR, &bfin_fault_irq);
1231
1232        return 0;
1233}
1234
1235#ifdef CONFIG_DO_IRQ_L1
1236__attribute__((l1_text))
1237#endif
1238static int vec_to_irq(int vec)
1239{
1240        if (likely(vec == EVT_IVTMR_P))
1241                return IRQ_CORETMR;
1242
1243        return BFIN_IRQ(bfin_read_SEC_SCI(0, SEC_CSID));
1244}
1245#endif  /* SEC_GCTL */
1246
1247#ifdef CONFIG_DO_IRQ_L1
1248__attribute__((l1_text))
1249#endif
1250void do_irq(int vec, struct pt_regs *fp)
1251{
1252        int irq = vec_to_irq(vec);
1253        if (irq == -1)
1254                return;
1255        asm_do_IRQ(irq, fp);
1256}
1257
1258#ifdef CONFIG_IPIPE
1259
1260int __ipipe_get_irq_priority(unsigned irq)
1261{
1262        int ient, prio;
1263
1264        if (irq <= IRQ_CORETMR)
1265                return irq;
1266
1267#ifdef SEC_GCTL
1268        if (irq >= BFIN_IRQ(0))
1269                return IVG11;
1270#else
1271        for (ient = 0; ient < NR_PERI_INTS; ient++) {
1272                struct ivgx *ivg = ivg_table + ient;
1273                if (ivg->irqno == irq) {
1274                        for (prio = 0; prio <= IVG13-IVG7; prio++) {
1275                                if (ivg7_13[prio].ifirst <= ivg &&
1276                                    ivg7_13[prio].istop > ivg)
1277                                        return IVG7 + prio;
1278                        }
1279                }
1280        }
1281#endif
1282
1283        return IVG15;
1284}
1285
1286/* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
1287#ifdef CONFIG_DO_IRQ_L1
1288__attribute__((l1_text))
1289#endif
1290asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1291{
1292        struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
1293        struct ipipe_domain *this_domain = __ipipe_current_domain;
1294        int irq, s = 0;
1295
1296        irq = vec_to_irq(vec);
1297        if (irq == -1)
1298                return 0;
1299
1300        if (irq == IRQ_SYSTMR) {
1301#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
1302                bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
1303#endif
1304                /* This is basically what we need from the register frame. */
1305                __this_cpu_write(__ipipe_tick_regs.ipend, regs->ipend);
1306                __this_cpu_write(__ipipe_tick_regs.pc, regs->pc);
1307                if (this_domain != ipipe_root_domain)
1308                        __this_cpu_and(__ipipe_tick_regs.ipend, ~0x10);
1309                else
1310                        __this_cpu_or(__ipipe_tick_regs.ipend, 0x10);
1311        }
1312
1313        /*
1314         * We don't want Linux interrupt handlers to run at the
1315         * current core priority level (i.e. < EVT15), since this
1316         * might delay other interrupts handled by a high priority
1317         * domain. Here is what we do instead:
1318         *
1319         * - we raise the SYNCDEFER bit to prevent
1320         * __ipipe_handle_irq() to sync the pipeline for the root
1321         * stage for the incoming interrupt. Upon return, that IRQ is
1322         * pending in the interrupt log.
1323         *
1324         * - we raise the TIF_IRQ_SYNC bit for the current thread, so
1325         * that _schedule_and_signal_from_int will eventually sync the
1326         * pipeline from EVT15.
1327         */
1328        if (this_domain == ipipe_root_domain) {
1329                s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1330                barrier();
1331        }
1332
1333        ipipe_trace_irq_entry(irq);
1334        __ipipe_handle_irq(irq, regs);
1335        ipipe_trace_irq_exit(irq);
1336
1337        if (user_mode(regs) &&
1338            !ipipe_test_foreign_stack() &&
1339            (current->ipipe_flags & PF_EVTRET) != 0) {
1340                /*
1341                 * Testing for user_regs() does NOT fully eliminate
1342                 * foreign stack contexts, because of the forged
1343                 * interrupt returns we do through
1344                 * __ipipe_call_irqtail. In that case, we might have
1345                 * preempted a foreign stack context in a high
1346                 * priority domain, with a single interrupt level now
1347                 * pending after the irqtail unwinding is done. In
1348                 * which case user_mode() is now true, and the event
1349                 * gets dispatched spuriously.
1350                 */
1351                current->ipipe_flags &= ~PF_EVTRET;
1352                __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
1353        }
1354
1355        if (this_domain == ipipe_root_domain) {
1356                set_thread_flag(TIF_IRQ_SYNC);
1357                if (!s) {
1358                        __clear_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1359                        return !test_bit(IPIPE_STALL_FLAG, &p->status);
1360                }
1361        }
1362
1363        return 0;
1364}
1365
1366#endif /* CONFIG_IPIPE */
1367