linux/arch/blackfin/mach-common/ints-priority.c
<<
>>
Prefs
   1/*
   2 * Set up the interrupt priorities
   3 *
   4 * Copyright  2004-2009 Analog Devices Inc.
   5 *                 2003 Bas Vermeulen <bas@buyways.nl>
   6 *                 2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca>
   7 *            2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca>
   8 *                 1999 D. Jeff Dionne <jeff@uclinux.org>
   9 *                 1996 Roman Zippel
  10 *
  11 * Licensed under the GPL-2
  12 */
  13
  14#include <linux/module.h>
  15#include <linux/kernel_stat.h>
  16#include <linux/seq_file.h>
  17#include <linux/irq.h>
  18#include <linux/sched.h>
  19#include <linux/syscore_ops.h>
  20#include <asm/delay.h>
  21#ifdef CONFIG_IPIPE
  22#include <linux/ipipe.h>
  23#endif
  24#include <asm/traps.h>
  25#include <asm/blackfin.h>
  26#include <asm/gpio.h>
  27#include <asm/irq_handler.h>
  28#include <asm/dpmc.h>
  29#include <asm/traps.h>
  30
  31/*
  32 * NOTES:
  33 * - we have separated the physical Hardware interrupt from the
  34 * levels that the LINUX kernel sees (see the description in irq.h)
  35 * -
  36 */
  37
  38#ifndef CONFIG_SMP
  39/* Initialize this to an actual value to force it into the .data
  40 * section so that we know it is properly initialized at entry into
  41 * the kernel but before bss is initialized to zero (which is where
  42 * it would live otherwise).  The 0x1f magic represents the IRQs we
  43 * cannot actually mask out in hardware.
  44 */
  45unsigned long bfin_irq_flags = 0x1f;
  46EXPORT_SYMBOL(bfin_irq_flags);
  47#endif
  48
  49#ifdef CONFIG_PM
  50unsigned long bfin_sic_iwr[3];  /* Up to 3 SIC_IWRx registers */
  51unsigned vr_wakeup;
  52#endif
  53
  54#ifndef SEC_GCTL
  55static struct ivgx {
  56        /* irq number for request_irq, available in mach-bf5xx/irq.h */
  57        unsigned int irqno;
  58        /* corresponding bit in the SIC_ISR register */
  59        unsigned int isrflag;
  60} ivg_table[NR_PERI_INTS];
  61
  62static struct ivg_slice {
  63        /* position of first irq in ivg_table for given ivg */
  64        struct ivgx *ifirst;
  65        struct ivgx *istop;
  66} ivg7_13[IVG13 - IVG7 + 1];
  67
  68
  69/*
  70 * Search SIC_IAR and fill tables with the irqvalues
  71 * and their positions in the SIC_ISR register.
  72 */
  73static void __init search_IAR(void)
  74{
  75        unsigned ivg, irq_pos = 0;
  76        for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) {
  77                int irqN;
  78
  79                ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos];
  80
  81                for (irqN = 0; irqN < NR_PERI_INTS; irqN += 4) {
  82                        int irqn;
  83                        u32 iar =
  84                                bfin_read32((unsigned long *)SIC_IAR0 +
  85#if defined(CONFIG_BF51x) || defined(CONFIG_BF52x) || \
  86        defined(CONFIG_BF538) || defined(CONFIG_BF539)
  87                                ((irqN % 32) >> 3) + ((irqN / 32) * ((SIC_IAR4 - SIC_IAR0) / 4))
  88#else
  89                                (irqN >> 3)
  90#endif
  91                                );
  92                        for (irqn = irqN; irqn < irqN + 4; ++irqn) {
  93                                int iar_shift = (irqn & 7) * 4;
  94                                if (ivg == (0xf & (iar >> iar_shift))) {
  95                                        ivg_table[irq_pos].irqno = IVG7 + irqn;
  96                                        ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
  97                                        ivg7_13[ivg].istop++;
  98                                        irq_pos++;
  99                                }
 100                        }
 101                }
 102        }
 103}
 104#endif
 105
 106/*
 107 * This is for core internal IRQs
 108 */
 109void bfin_ack_noop(struct irq_data *d)
 110{
 111        /* Dummy function.  */
 112}
 113
 114static void bfin_core_mask_irq(struct irq_data *d)
 115{
 116        bfin_irq_flags &= ~(1 << d->irq);
 117        if (!hard_irqs_disabled())
 118                hard_local_irq_enable();
 119}
 120
 121static void bfin_core_unmask_irq(struct irq_data *d)
 122{
 123        bfin_irq_flags |= 1 << d->irq;
 124        /*
 125         * If interrupts are enabled, IMASK must contain the same value
 126         * as bfin_irq_flags.  Make sure that invariant holds.  If interrupts
 127         * are currently disabled we need not do anything; one of the
 128         * callers will take care of setting IMASK to the proper value
 129         * when reenabling interrupts.
 130         * local_irq_enable just does "STI bfin_irq_flags", so it's exactly
 131         * what we need.
 132         */
 133        if (!hard_irqs_disabled())
 134                hard_local_irq_enable();
 135        return;
 136}
 137
 138#ifndef SEC_GCTL
 139void bfin_internal_mask_irq(unsigned int irq)
 140{
 141        unsigned long flags = hard_local_irq_save();
 142#ifdef SIC_IMASK0
 143        unsigned mask_bank = BFIN_SYSIRQ(irq) / 32;
 144        unsigned mask_bit = BFIN_SYSIRQ(irq) % 32;
 145        bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
 146                        ~(1 << mask_bit));
 147# if defined(CONFIG_SMP) || defined(CONFIG_ICC)
 148        bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
 149                        ~(1 << mask_bit));
 150# endif
 151#else
 152        bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
 153                        ~(1 << BFIN_SYSIRQ(irq)));
 154#endif /* end of SIC_IMASK0 */
 155        hard_local_irq_restore(flags);
 156}
 157
 158static void bfin_internal_mask_irq_chip(struct irq_data *d)
 159{
 160        bfin_internal_mask_irq(d->irq);
 161}
 162
 163#ifdef CONFIG_SMP
 164void bfin_internal_unmask_irq_affinity(unsigned int irq,
 165                const struct cpumask *affinity)
 166#else
 167void bfin_internal_unmask_irq(unsigned int irq)
 168#endif
 169{
 170        unsigned long flags = hard_local_irq_save();
 171
 172#ifdef SIC_IMASK0
 173        unsigned mask_bank = BFIN_SYSIRQ(irq) / 32;
 174        unsigned mask_bit = BFIN_SYSIRQ(irq) % 32;
 175# ifdef CONFIG_SMP
 176        if (cpumask_test_cpu(0, affinity))
 177# endif
 178                bfin_write_SIC_IMASK(mask_bank,
 179                                bfin_read_SIC_IMASK(mask_bank) |
 180                                (1 << mask_bit));
 181# ifdef CONFIG_SMP
 182        if (cpumask_test_cpu(1, affinity))
 183                bfin_write_SICB_IMASK(mask_bank,
 184                                bfin_read_SICB_IMASK(mask_bank) |
 185                                (1 << mask_bit));
 186# endif
 187#else
 188        bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
 189                        (1 << BFIN_SYSIRQ(irq)));
 190#endif
 191        hard_local_irq_restore(flags);
 192}
 193
 194#ifdef CONFIG_SMP
 195static void bfin_internal_unmask_irq_chip(struct irq_data *d)
 196{
 197        bfin_internal_unmask_irq_affinity(d->irq, d->affinity);
 198}
 199
 200static int bfin_internal_set_affinity(struct irq_data *d,
 201                                      const struct cpumask *mask, bool force)
 202{
 203        bfin_internal_mask_irq(d->irq);
 204        bfin_internal_unmask_irq_affinity(d->irq, mask);
 205
 206        return 0;
 207}
 208#else
 209static void bfin_internal_unmask_irq_chip(struct irq_data *d)
 210{
 211        bfin_internal_unmask_irq(d->irq);
 212}
 213#endif
 214
 215#if defined(CONFIG_PM)
 216int bfin_internal_set_wake(unsigned int irq, unsigned int state)
 217{
 218        u32 bank, bit, wakeup = 0;
 219        unsigned long flags;
 220        bank = BFIN_SYSIRQ(irq) / 32;
 221        bit = BFIN_SYSIRQ(irq) % 32;
 222
 223        switch (irq) {
 224#ifdef IRQ_RTC
 225        case IRQ_RTC:
 226        wakeup |= WAKE;
 227        break;
 228#endif
 229#ifdef IRQ_CAN0_RX
 230        case IRQ_CAN0_RX:
 231        wakeup |= CANWE;
 232        break;
 233#endif
 234#ifdef IRQ_CAN1_RX
 235        case IRQ_CAN1_RX:
 236        wakeup |= CANWE;
 237        break;
 238#endif
 239#ifdef IRQ_USB_INT0
 240        case IRQ_USB_INT0:
 241        wakeup |= USBWE;
 242        break;
 243#endif
 244#ifdef CONFIG_BF54x
 245        case IRQ_CNT:
 246        wakeup |= ROTWE;
 247        break;
 248#endif
 249        default:
 250        break;
 251        }
 252
 253        flags = hard_local_irq_save();
 254
 255        if (state) {
 256                bfin_sic_iwr[bank] |= (1 << bit);
 257                vr_wakeup  |= wakeup;
 258
 259        } else {
 260                bfin_sic_iwr[bank] &= ~(1 << bit);
 261                vr_wakeup  &= ~wakeup;
 262        }
 263
 264        hard_local_irq_restore(flags);
 265
 266        return 0;
 267}
 268
 269static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state)
 270{
 271        return bfin_internal_set_wake(d->irq, state);
 272}
 273#else
 274inline int bfin_internal_set_wake(unsigned int irq, unsigned int state)
 275{
 276        return 0;
 277}
 278# define bfin_internal_set_wake_chip NULL
 279#endif
 280
 281#else /* SEC_GCTL */
 282static void bfin_sec_preflow_handler(struct irq_data *d)
 283{
 284        unsigned long flags = hard_local_irq_save();
 285        unsigned int sid = BFIN_SYSIRQ(d->irq);
 286
 287        bfin_write_SEC_SCI(0, SEC_CSID, sid);
 288
 289        hard_local_irq_restore(flags);
 290}
 291
 292static void bfin_sec_mask_ack_irq(struct irq_data *d)
 293{
 294        unsigned long flags = hard_local_irq_save();
 295        unsigned int sid = BFIN_SYSIRQ(d->irq);
 296
 297        bfin_write_SEC_SCI(0, SEC_CSID, sid);
 298
 299        hard_local_irq_restore(flags);
 300}
 301
 302static void bfin_sec_unmask_irq(struct irq_data *d)
 303{
 304        unsigned long flags = hard_local_irq_save();
 305        unsigned int sid = BFIN_SYSIRQ(d->irq);
 306
 307        bfin_write32(SEC_END, sid);
 308
 309        hard_local_irq_restore(flags);
 310}
 311
 312static void bfin_sec_enable_ssi(unsigned int sid)
 313{
 314        unsigned long flags = hard_local_irq_save();
 315        uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
 316
 317        reg_sctl |= SEC_SCTL_SRC_EN;
 318        bfin_write_SEC_SCTL(sid, reg_sctl);
 319
 320        hard_local_irq_restore(flags);
 321}
 322
 323static void bfin_sec_disable_ssi(unsigned int sid)
 324{
 325        unsigned long flags = hard_local_irq_save();
 326        uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
 327
 328        reg_sctl &= ((uint32_t)~SEC_SCTL_SRC_EN);
 329        bfin_write_SEC_SCTL(sid, reg_sctl);
 330
 331        hard_local_irq_restore(flags);
 332}
 333
 334static void bfin_sec_set_ssi_coreid(unsigned int sid, unsigned int coreid)
 335{
 336        unsigned long flags = hard_local_irq_save();
 337        uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
 338
 339        reg_sctl &= ((uint32_t)~SEC_SCTL_CTG);
 340        bfin_write_SEC_SCTL(sid, reg_sctl | ((coreid << 20) & SEC_SCTL_CTG));
 341
 342        hard_local_irq_restore(flags);
 343}
 344
 345static void bfin_sec_enable_sci(unsigned int sid)
 346{
 347        unsigned long flags = hard_local_irq_save();
 348        uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
 349
 350        if (sid == BFIN_SYSIRQ(IRQ_WATCH0))
 351                reg_sctl |= SEC_SCTL_FAULT_EN;
 352        else
 353                reg_sctl |= SEC_SCTL_INT_EN;
 354        bfin_write_SEC_SCTL(sid, reg_sctl);
 355
 356        hard_local_irq_restore(flags);
 357}
 358
 359static void bfin_sec_disable_sci(unsigned int sid)
 360{
 361        unsigned long flags = hard_local_irq_save();
 362        uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
 363
 364        reg_sctl &= ((uint32_t)~SEC_SCTL_INT_EN);
 365        bfin_write_SEC_SCTL(sid, reg_sctl);
 366
 367        hard_local_irq_restore(flags);
 368}
 369
 370static void bfin_sec_enable(struct irq_data *d)
 371{
 372        unsigned long flags = hard_local_irq_save();
 373        unsigned int sid = BFIN_SYSIRQ(d->irq);
 374
 375        bfin_sec_enable_sci(sid);
 376        bfin_sec_enable_ssi(sid);
 377
 378        hard_local_irq_restore(flags);
 379}
 380
 381static void bfin_sec_disable(struct irq_data *d)
 382{
 383        unsigned long flags = hard_local_irq_save();
 384        unsigned int sid = BFIN_SYSIRQ(d->irq);
 385
 386        bfin_sec_disable_sci(sid);
 387        bfin_sec_disable_ssi(sid);
 388
 389        hard_local_irq_restore(flags);
 390}
 391
 392static void bfin_sec_set_priority(unsigned int sec_int_levels, u8 *sec_int_priority)
 393{
 394        unsigned long flags = hard_local_irq_save();
 395        uint32_t reg_sctl;
 396        int i;
 397
 398        bfin_write_SEC_SCI(0, SEC_CPLVL, sec_int_levels);
 399
 400        for (i = 0; i < SYS_IRQS - BFIN_IRQ(0); i++) {
 401                reg_sctl = bfin_read_SEC_SCTL(i) & ~SEC_SCTL_PRIO;
 402                reg_sctl |= sec_int_priority[i] << SEC_SCTL_PRIO_OFFSET;
 403                bfin_write_SEC_SCTL(i, reg_sctl);
 404        }
 405
 406        hard_local_irq_restore(flags);
 407}
 408
 409void bfin_sec_raise_irq(unsigned int irq)
 410{
 411        unsigned long flags = hard_local_irq_save();
 412        unsigned int sid = BFIN_SYSIRQ(irq);
 413
 414        bfin_write32(SEC_RAISE, sid);
 415
 416        hard_local_irq_restore(flags);
 417}
 418
 419static void init_software_driven_irq(void)
 420{
 421        bfin_sec_set_ssi_coreid(34, 0);
 422        bfin_sec_set_ssi_coreid(35, 1);
 423
 424        bfin_sec_enable_sci(35);
 425        bfin_sec_enable_ssi(35);
 426        bfin_sec_set_ssi_coreid(36, 0);
 427        bfin_sec_set_ssi_coreid(37, 1);
 428        bfin_sec_enable_sci(37);
 429        bfin_sec_enable_ssi(37);
 430}
 431
 432void bfin_sec_resume(void)
 433{
 434        bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
 435        udelay(100);
 436        bfin_write_SEC_GCTL(SEC_GCTL_EN);
 437        bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
 438}
 439
 440void handle_sec_sfi_fault(uint32_t gstat)
 441{
 442
 443}
 444
 445void handle_sec_sci_fault(uint32_t gstat)
 446{
 447        uint32_t core_id;
 448        uint32_t cstat;
 449
 450        core_id = gstat & SEC_GSTAT_SCI;
 451        cstat = bfin_read_SEC_SCI(core_id, SEC_CSTAT);
 452        if (cstat & SEC_CSTAT_ERR) {
 453                switch (cstat & SEC_CSTAT_ERRC) {
 454                case SEC_CSTAT_ACKERR:
 455                        printk(KERN_DEBUG "sec ack err\n");
 456                        break;
 457                default:
 458                        printk(KERN_DEBUG "sec sci unknown err\n");
 459                }
 460        }
 461
 462}
 463
 464void handle_sec_ssi_fault(uint32_t gstat)
 465{
 466        uint32_t sid;
 467        uint32_t sstat;
 468
 469        sid = gstat & SEC_GSTAT_SID;
 470        sstat = bfin_read_SEC_SSTAT(sid);
 471
 472}
 473
 474void handle_sec_fault(uint32_t sec_gstat)
 475{
 476        if (sec_gstat & SEC_GSTAT_ERR) {
 477
 478                switch (sec_gstat & SEC_GSTAT_ERRC) {
 479                case 0:
 480                        handle_sec_sfi_fault(sec_gstat);
 481                        break;
 482                case SEC_GSTAT_SCIERR:
 483                        handle_sec_sci_fault(sec_gstat);
 484                        break;
 485                case SEC_GSTAT_SSIERR:
 486                        handle_sec_ssi_fault(sec_gstat);
 487                        break;
 488                }
 489
 490
 491        }
 492}
 493
 494static struct irqaction bfin_fault_irq = {
 495        .name = "Blackfin fault",
 496};
 497
 498static irqreturn_t bfin_fault_routine(int irq, void *data)
 499{
 500        struct pt_regs *fp = get_irq_regs();
 501
 502        switch (irq) {
 503        case IRQ_C0_DBL_FAULT:
 504                double_fault_c(fp);
 505                break;
 506        case IRQ_C0_HW_ERR:
 507                dump_bfin_process(fp);
 508                dump_bfin_mem(fp);
 509                show_regs(fp);
 510                printk(KERN_NOTICE "Kernel Stack\n");
 511                show_stack(current, NULL);
 512                print_modules();
 513                panic("Core 0 hardware error");
 514                break;
 515        case IRQ_C0_NMI_L1_PARITY_ERR:
 516                panic("Core 0 NMI L1 parity error");
 517                break;
 518        case IRQ_SEC_ERR:
 519                pr_err("SEC error\n");
 520                handle_sec_fault(bfin_read32(SEC_GSTAT));
 521                break;
 522        default:
 523                panic("Unknown fault %d", irq);
 524        }
 525
 526        return IRQ_HANDLED;
 527}
 528#endif /* SEC_GCTL */
 529
 530static struct irq_chip bfin_core_irqchip = {
 531        .name = "CORE",
 532        .irq_mask = bfin_core_mask_irq,
 533        .irq_unmask = bfin_core_unmask_irq,
 534};
 535
 536#ifndef SEC_GCTL
 537static struct irq_chip bfin_internal_irqchip = {
 538        .name = "INTN",
 539        .irq_mask = bfin_internal_mask_irq_chip,
 540        .irq_unmask = bfin_internal_unmask_irq_chip,
 541        .irq_disable = bfin_internal_mask_irq_chip,
 542        .irq_enable = bfin_internal_unmask_irq_chip,
 543#ifdef CONFIG_SMP
 544        .irq_set_affinity = bfin_internal_set_affinity,
 545#endif
 546        .irq_set_wake = bfin_internal_set_wake_chip,
 547};
 548#else
 549static struct irq_chip bfin_sec_irqchip = {
 550        .name = "SEC",
 551        .irq_mask_ack = bfin_sec_mask_ack_irq,
 552        .irq_mask = bfin_sec_mask_ack_irq,
 553        .irq_unmask = bfin_sec_unmask_irq,
 554        .irq_eoi = bfin_sec_unmask_irq,
 555        .irq_disable = bfin_sec_disable,
 556        .irq_enable = bfin_sec_enable,
 557};
 558#endif
 559
 560void bfin_handle_irq(unsigned irq)
 561{
 562#ifdef CONFIG_IPIPE
 563        struct pt_regs regs;    /* Contents not used. */
 564        ipipe_trace_irq_entry(irq);
 565        __ipipe_handle_irq(irq, &regs);
 566        ipipe_trace_irq_exit(irq);
 567#else /* !CONFIG_IPIPE */
 568        generic_handle_irq(irq);
 569#endif  /* !CONFIG_IPIPE */
 570}
 571
 572#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
 573static int mac_stat_int_mask;
 574
 575static void bfin_mac_status_ack_irq(unsigned int irq)
 576{
 577        switch (irq) {
 578        case IRQ_MAC_MMCINT:
 579                bfin_write_EMAC_MMC_TIRQS(
 580                        bfin_read_EMAC_MMC_TIRQE() &
 581                        bfin_read_EMAC_MMC_TIRQS());
 582                bfin_write_EMAC_MMC_RIRQS(
 583                        bfin_read_EMAC_MMC_RIRQE() &
 584                        bfin_read_EMAC_MMC_RIRQS());
 585                break;
 586        case IRQ_MAC_RXFSINT:
 587                bfin_write_EMAC_RX_STKY(
 588                        bfin_read_EMAC_RX_IRQE() &
 589                        bfin_read_EMAC_RX_STKY());
 590                break;
 591        case IRQ_MAC_TXFSINT:
 592                bfin_write_EMAC_TX_STKY(
 593                        bfin_read_EMAC_TX_IRQE() &
 594                        bfin_read_EMAC_TX_STKY());
 595                break;
 596        case IRQ_MAC_WAKEDET:
 597                 bfin_write_EMAC_WKUP_CTL(
 598                        bfin_read_EMAC_WKUP_CTL() | MPKS | RWKS);
 599                break;
 600        default:
 601                /* These bits are W1C */
 602                bfin_write_EMAC_SYSTAT(1L << (irq - IRQ_MAC_PHYINT));
 603                break;
 604        }
 605}
 606
 607static void bfin_mac_status_mask_irq(struct irq_data *d)
 608{
 609        unsigned int irq = d->irq;
 610
 611        mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT));
 612#ifdef BF537_FAMILY
 613        switch (irq) {
 614        case IRQ_MAC_PHYINT:
 615                bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE);
 616                break;
 617        default:
 618                break;
 619        }
 620#else
 621        if (!mac_stat_int_mask)
 622                bfin_internal_mask_irq(IRQ_MAC_ERROR);
 623#endif
 624        bfin_mac_status_ack_irq(irq);
 625}
 626
 627static void bfin_mac_status_unmask_irq(struct irq_data *d)
 628{
 629        unsigned int irq = d->irq;
 630
 631#ifdef BF537_FAMILY
 632        switch (irq) {
 633        case IRQ_MAC_PHYINT:
 634                bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE);
 635                break;
 636        default:
 637                break;
 638        }
 639#else
 640        if (!mac_stat_int_mask)
 641                bfin_internal_unmask_irq(IRQ_MAC_ERROR);
 642#endif
 643        mac_stat_int_mask |= 1L << (irq - IRQ_MAC_PHYINT);
 644}
 645
 646#ifdef CONFIG_PM
 647int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state)
 648{
 649#ifdef BF537_FAMILY
 650        return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state);
 651#else
 652        return bfin_internal_set_wake(IRQ_MAC_ERROR, state);
 653#endif
 654}
 655#else
 656# define bfin_mac_status_set_wake NULL
 657#endif
 658
 659static struct irq_chip bfin_mac_status_irqchip = {
 660        .name = "MACST",
 661        .irq_mask = bfin_mac_status_mask_irq,
 662        .irq_unmask = bfin_mac_status_unmask_irq,
 663        .irq_set_wake = bfin_mac_status_set_wake,
 664};
 665
 666void bfin_demux_mac_status_irq(unsigned int int_err_irq,
 667                               struct irq_desc *inta_desc)
 668{
 669        int i, irq = 0;
 670        u32 status = bfin_read_EMAC_SYSTAT();
 671
 672        for (i = 0; i <= (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++)
 673                if (status & (1L << i)) {
 674                        irq = IRQ_MAC_PHYINT + i;
 675                        break;
 676                }
 677
 678        if (irq) {
 679                if (mac_stat_int_mask & (1L << (irq - IRQ_MAC_PHYINT))) {
 680                        bfin_handle_irq(irq);
 681                } else {
 682                        bfin_mac_status_ack_irq(irq);
 683                        pr_debug("IRQ %d:"
 684                                        " MASKED MAC ERROR INTERRUPT ASSERTED\n",
 685                                        irq);
 686                }
 687        } else
 688                printk(KERN_ERR
 689                                "%s : %s : LINE %d :\nIRQ ?: MAC ERROR"
 690                                " INTERRUPT ASSERTED BUT NO SOURCE FOUND"
 691                                "(EMAC_SYSTAT=0x%X)\n",
 692                                __func__, __FILE__, __LINE__, status);
 693}
 694#endif
 695
 696static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
 697{
 698#ifdef CONFIG_IPIPE
 699        handle = handle_level_irq;
 700#endif
 701        __irq_set_handler_locked(irq, handle);
 702}
 703
 704#ifdef CONFIG_GPIO_ADI
 705
 706static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
 707
 708static void bfin_gpio_ack_irq(struct irq_data *d)
 709{
 710        /* AFAIK ack_irq in case mask_ack is provided
 711         * get's only called for edge sense irqs
 712         */
 713        set_gpio_data(irq_to_gpio(d->irq), 0);
 714}
 715
 716static void bfin_gpio_mask_ack_irq(struct irq_data *d)
 717{
 718        unsigned int irq = d->irq;
 719        u32 gpionr = irq_to_gpio(irq);
 720
 721        if (!irqd_is_level_type(d))
 722                set_gpio_data(gpionr, 0);
 723
 724        set_gpio_maska(gpionr, 0);
 725}
 726
 727static void bfin_gpio_mask_irq(struct irq_data *d)
 728{
 729        set_gpio_maska(irq_to_gpio(d->irq), 0);
 730}
 731
 732static void bfin_gpio_unmask_irq(struct irq_data *d)
 733{
 734        set_gpio_maska(irq_to_gpio(d->irq), 1);
 735}
 736
 737static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
 738{
 739        u32 gpionr = irq_to_gpio(d->irq);
 740
 741        if (__test_and_set_bit(gpionr, gpio_enabled))
 742                bfin_gpio_irq_prepare(gpionr);
 743
 744        bfin_gpio_unmask_irq(d);
 745
 746        return 0;
 747}
 748
 749static void bfin_gpio_irq_shutdown(struct irq_data *d)
 750{
 751        u32 gpionr = irq_to_gpio(d->irq);
 752
 753        bfin_gpio_mask_irq(d);
 754        __clear_bit(gpionr, gpio_enabled);
 755        bfin_gpio_irq_free(gpionr);
 756}
 757
 758static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
 759{
 760        unsigned int irq = d->irq;
 761        int ret;
 762        char buf[16];
 763        u32 gpionr = irq_to_gpio(irq);
 764
 765        if (type == IRQ_TYPE_PROBE) {
 766                /* only probe unenabled GPIO interrupt lines */
 767                if (test_bit(gpionr, gpio_enabled))
 768                        return 0;
 769                type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
 770        }
 771
 772        if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
 773                    IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
 774
 775                snprintf(buf, 16, "gpio-irq%d", irq);
 776                ret = bfin_gpio_irq_request(gpionr, buf);
 777                if (ret)
 778                        return ret;
 779
 780                if (__test_and_set_bit(gpionr, gpio_enabled))
 781                        bfin_gpio_irq_prepare(gpionr);
 782
 783        } else {
 784                __clear_bit(gpionr, gpio_enabled);
 785                return 0;
 786        }
 787
 788        set_gpio_inen(gpionr, 0);
 789        set_gpio_dir(gpionr, 0);
 790
 791        if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
 792            == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
 793                set_gpio_both(gpionr, 1);
 794        else
 795                set_gpio_both(gpionr, 0);
 796
 797        if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
 798                set_gpio_polar(gpionr, 1);      /* low or falling edge denoted by one */
 799        else
 800                set_gpio_polar(gpionr, 0);      /* high or rising edge denoted by zero */
 801
 802        if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
 803                set_gpio_edge(gpionr, 1);
 804                set_gpio_inen(gpionr, 1);
 805                set_gpio_data(gpionr, 0);
 806
 807        } else {
 808                set_gpio_edge(gpionr, 0);
 809                set_gpio_inen(gpionr, 1);
 810        }
 811
 812        if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
 813                bfin_set_irq_handler(irq, handle_edge_irq);
 814        else
 815                bfin_set_irq_handler(irq, handle_level_irq);
 816
 817        return 0;
 818}
 819
 820static void bfin_demux_gpio_block(unsigned int irq)
 821{
 822        unsigned int gpio, mask;
 823
 824        gpio = irq_to_gpio(irq);
 825        mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
 826
 827        while (mask) {
 828                if (mask & 1)
 829                        bfin_handle_irq(irq);
 830                irq++;
 831                mask >>= 1;
 832        }
 833}
 834
 835void bfin_demux_gpio_irq(unsigned int inta_irq,
 836                        struct irq_desc *desc)
 837{
 838        unsigned int irq;
 839
 840        switch (inta_irq) {
 841#if defined(BF537_FAMILY)
 842        case IRQ_PF_INTA_PG_INTA:
 843                bfin_demux_gpio_block(IRQ_PF0);
 844                irq = IRQ_PG0;
 845                break;
 846        case IRQ_PH_INTA_MAC_RX:
 847                irq = IRQ_PH0;
 848                break;
 849#elif defined(BF533_FAMILY)
 850        case IRQ_PROG_INTA:
 851                irq = IRQ_PF0;
 852                break;
 853#elif defined(BF538_FAMILY)
 854        case IRQ_PORTF_INTA:
 855                irq = IRQ_PF0;
 856                break;
 857#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
 858        case IRQ_PORTF_INTA:
 859                irq = IRQ_PF0;
 860                break;
 861        case IRQ_PORTG_INTA:
 862                irq = IRQ_PG0;
 863                break;
 864        case IRQ_PORTH_INTA:
 865                irq = IRQ_PH0;
 866                break;
 867#elif defined(CONFIG_BF561)
 868        case IRQ_PROG0_INTA:
 869                irq = IRQ_PF0;
 870                break;
 871        case IRQ_PROG1_INTA:
 872                irq = IRQ_PF16;
 873                break;
 874        case IRQ_PROG2_INTA:
 875                irq = IRQ_PF32;
 876                break;
 877#endif
 878        default:
 879                BUG();
 880                return;
 881        }
 882
 883        bfin_demux_gpio_block(irq);
 884}
 885
 886#ifdef CONFIG_PM
 887
 888static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
 889{
 890        return bfin_gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state);
 891}
 892
 893#else
 894
 895# define bfin_gpio_set_wake NULL
 896
 897#endif
 898
 899static struct irq_chip bfin_gpio_irqchip = {
 900        .name = "GPIO",
 901        .irq_ack = bfin_gpio_ack_irq,
 902        .irq_mask = bfin_gpio_mask_irq,
 903        .irq_mask_ack = bfin_gpio_mask_ack_irq,
 904        .irq_unmask = bfin_gpio_unmask_irq,
 905        .irq_disable = bfin_gpio_mask_irq,
 906        .irq_enable = bfin_gpio_unmask_irq,
 907        .irq_set_type = bfin_gpio_irq_type,
 908        .irq_startup = bfin_gpio_irq_startup,
 909        .irq_shutdown = bfin_gpio_irq_shutdown,
 910        .irq_set_wake = bfin_gpio_set_wake,
 911};
 912
 913#endif
 914
 915#ifdef CONFIG_PM
 916
 917#ifdef SEC_GCTL
 918static u32 save_pint_sec_ctl[NR_PINT_SYS_IRQS];
 919
 920static int sec_suspend(void)
 921{
 922        u32 bank;
 923
 924        for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++)
 925                save_pint_sec_ctl[bank] = bfin_read_SEC_SCTL(bank + BFIN_SYSIRQ(IRQ_PINT0));
 926        return 0;
 927}
 928
 929static void sec_resume(void)
 930{
 931        u32 bank;
 932
 933        bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
 934        udelay(100);
 935        bfin_write_SEC_GCTL(SEC_GCTL_EN);
 936        bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
 937
 938        for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++)
 939                bfin_write_SEC_SCTL(bank + BFIN_SYSIRQ(IRQ_PINT0), save_pint_sec_ctl[bank]);
 940}
 941
 942static struct syscore_ops sec_pm_syscore_ops = {
 943        .suspend = sec_suspend,
 944        .resume = sec_resume,
 945};
 946#endif
 947
 948#endif
 949
 950void init_exception_vectors(void)
 951{
 952        /* cannot program in software:
 953         * evt0 - emulation (jtag)
 954         * evt1 - reset
 955         */
 956        bfin_write_EVT2(evt_nmi);
 957        bfin_write_EVT3(trap);
 958        bfin_write_EVT5(evt_ivhw);
 959        bfin_write_EVT6(evt_timer);
 960        bfin_write_EVT7(evt_evt7);
 961        bfin_write_EVT8(evt_evt8);
 962        bfin_write_EVT9(evt_evt9);
 963        bfin_write_EVT10(evt_evt10);
 964        bfin_write_EVT11(evt_evt11);
 965        bfin_write_EVT12(evt_evt12);
 966        bfin_write_EVT13(evt_evt13);
 967        bfin_write_EVT14(evt_evt14);
 968        bfin_write_EVT15(evt_system_call);
 969        CSYNC();
 970}
 971
 972#ifndef SEC_GCTL
 973/*
 974 * This function should be called during kernel startup to initialize
 975 * the BFin IRQ handling routines.
 976 */
 977
 978int __init init_arch_irq(void)
 979{
 980        int irq;
 981        unsigned long ilat = 0;
 982
 983        /*  Disable all the peripheral intrs  - page 4-29 HW Ref manual */
 984#ifdef SIC_IMASK0
 985        bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
 986        bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
 987# ifdef SIC_IMASK2
 988        bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
 989# endif
 990# if defined(CONFIG_SMP) || defined(CONFIG_ICC)
 991        bfin_write_SICB_IMASK0(SIC_UNMASK_ALL);
 992        bfin_write_SICB_IMASK1(SIC_UNMASK_ALL);
 993# endif
 994#else
 995        bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
 996#endif
 997
 998        local_irq_disable();
 999
1000        for (irq = 0; irq <= SYS_IRQS; irq++) {
1001                if (irq <= IRQ_CORETMR)
1002                        irq_set_chip(irq, &bfin_core_irqchip);
1003                else
1004                        irq_set_chip(irq, &bfin_internal_irqchip);
1005
1006                switch (irq) {
1007#if !BFIN_GPIO_PINT
1008#if defined(BF537_FAMILY)
1009                case IRQ_PH_INTA_MAC_RX:
1010                case IRQ_PF_INTA_PG_INTA:
1011#elif defined(BF533_FAMILY)
1012                case IRQ_PROG_INTA:
1013#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
1014                case IRQ_PORTF_INTA:
1015                case IRQ_PORTG_INTA:
1016                case IRQ_PORTH_INTA:
1017#elif defined(CONFIG_BF561)
1018                case IRQ_PROG0_INTA:
1019                case IRQ_PROG1_INTA:
1020                case IRQ_PROG2_INTA:
1021#elif defined(BF538_FAMILY)
1022                case IRQ_PORTF_INTA:
1023#endif
1024                        irq_set_chained_handler(irq, bfin_demux_gpio_irq);
1025                        break;
1026#endif
1027#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1028                case IRQ_MAC_ERROR:
1029                        irq_set_chained_handler(irq,
1030                                                bfin_demux_mac_status_irq);
1031                        break;
1032#endif
1033#if defined(CONFIG_SMP) || defined(CONFIG_ICC)
1034                case IRQ_SUPPLE_0:
1035                case IRQ_SUPPLE_1:
1036                        irq_set_handler(irq, handle_percpu_irq);
1037                        break;
1038#endif
1039
1040#ifdef CONFIG_TICKSOURCE_CORETMR
1041                case IRQ_CORETMR:
1042# ifdef CONFIG_SMP
1043                        irq_set_handler(irq, handle_percpu_irq);
1044# else
1045                        irq_set_handler(irq, handle_simple_irq);
1046# endif
1047                        break;
1048#endif
1049
1050#ifdef CONFIG_TICKSOURCE_GPTMR0
1051                case IRQ_TIMER0:
1052                        irq_set_handler(irq, handle_simple_irq);
1053                        break;
1054#endif
1055
1056                default:
1057#ifdef CONFIG_IPIPE
1058                        irq_set_handler(irq, handle_level_irq);
1059#else
1060                        irq_set_handler(irq, handle_simple_irq);
1061#endif
1062                        break;
1063                }
1064        }
1065
1066        init_mach_irq();
1067
1068#if (defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
1069        for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
1070                irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip,
1071                                         handle_level_irq);
1072#endif
1073        /* if configured as edge, then will be changed to do_edge_IRQ */
1074#ifdef CONFIG_GPIO_ADI
1075        for (irq = GPIO_IRQ_BASE;
1076                irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
1077                irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
1078                                         handle_level_irq);
1079#endif
1080        bfin_write_IMASK(0);
1081        CSYNC();
1082        ilat = bfin_read_ILAT();
1083        CSYNC();
1084        bfin_write_ILAT(ilat);
1085        CSYNC();
1086
1087        printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
1088        /* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx,
1089         * local_irq_enable()
1090         */
1091        program_IAR();
1092        /* Therefore it's better to setup IARs before interrupts enabled */
1093        search_IAR();
1094
1095        /* Enable interrupts IVG7-15 */
1096        bfin_irq_flags |= IMASK_IVG15 |
1097                IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1098                IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1099
1100
1101        /* This implicitly covers ANOMALY_05000171
1102         * Boot-ROM code modifies SICA_IWRx wakeup registers
1103         */
1104#ifdef SIC_IWR0
1105        bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
1106# ifdef SIC_IWR1
1107        /* BF52x/BF51x system reset does not properly reset SIC_IWR1 which
1108         * will screw up the bootrom as it relies on MDMA0/1 waking it
1109         * up from IDLE instructions.  See this report for more info:
1110         * http://blackfin.uclinux.org/gf/tracker/4323
1111         */
1112        if (ANOMALY_05000435)
1113                bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
1114        else
1115                bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
1116# endif
1117# ifdef SIC_IWR2
1118        bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
1119# endif
1120#else
1121        bfin_write_SIC_IWR(IWR_DISABLE_ALL);
1122#endif
1123        return 0;
1124}
1125
1126#ifdef CONFIG_DO_IRQ_L1
1127__attribute__((l1_text))
1128#endif
1129static int vec_to_irq(int vec)
1130{
1131        struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
1132        struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
1133        unsigned long sic_status[3];
1134        if (likely(vec == EVT_IVTMR_P))
1135                return IRQ_CORETMR;
1136#ifdef SIC_ISR
1137        sic_status[0] = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1138#else
1139        if (smp_processor_id()) {
1140# ifdef SICB_ISR0
1141                /* This will be optimized out in UP mode. */
1142                sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
1143                sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
1144# endif
1145        } else {
1146                sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1147                sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1148        }
1149#endif
1150#ifdef SIC_ISR2
1151        sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1152#endif
1153
1154        for (;; ivg++) {
1155                if (ivg >= ivg_stop)
1156                        return -1;
1157#ifdef SIC_ISR
1158                if (sic_status[0] & ivg->isrflag)
1159#else
1160                if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1161#endif
1162                        return ivg->irqno;
1163        }
1164}
1165
1166#else /* SEC_GCTL */
1167
1168/*
1169 * This function should be called during kernel startup to initialize
1170 * the BFin IRQ handling routines.
1171 */
1172
1173int __init init_arch_irq(void)
1174{
1175        int irq;
1176        unsigned long ilat = 0;
1177
1178        bfin_write_SEC_GCTL(SEC_GCTL_RESET);
1179
1180        local_irq_disable();
1181
1182        for (irq = 0; irq <= SYS_IRQS; irq++) {
1183                if (irq <= IRQ_CORETMR) {
1184                        irq_set_chip_and_handler(irq, &bfin_core_irqchip,
1185                                handle_simple_irq);
1186#if defined(CONFIG_TICKSOURCE_CORETMR) && defined(CONFIG_SMP)
1187                        if (irq == IRQ_CORETMR)
1188                                irq_set_handler(irq, handle_percpu_irq);
1189#endif
1190                } else if (irq >= BFIN_IRQ(34) && irq <= BFIN_IRQ(37)) {
1191                        irq_set_chip_and_handler(irq, &bfin_sec_irqchip,
1192                                handle_percpu_irq);
1193                } else {
1194                        irq_set_chip(irq, &bfin_sec_irqchip);
1195                        irq_set_handler(irq, handle_fasteoi_irq);
1196                        __irq_set_preflow_handler(irq, bfin_sec_preflow_handler);
1197                }
1198        }
1199
1200        bfin_write_IMASK(0);
1201        CSYNC();
1202        ilat = bfin_read_ILAT();
1203        CSYNC();
1204        bfin_write_ILAT(ilat);
1205        CSYNC();
1206
1207        printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
1208
1209        bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
1210
1211        /* Enable interrupts IVG7-15 */
1212        bfin_irq_flags |= IMASK_IVG15 |
1213            IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1214            IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1215
1216
1217        bfin_write_SEC_FCTL(SEC_FCTL_EN | SEC_FCTL_SYSRST_EN | SEC_FCTL_FLTIN_EN);
1218        bfin_sec_enable_sci(BFIN_SYSIRQ(IRQ_WATCH0));
1219        bfin_sec_enable_ssi(BFIN_SYSIRQ(IRQ_WATCH0));
1220        bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
1221        udelay(100);
1222        bfin_write_SEC_GCTL(SEC_GCTL_EN);
1223        bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
1224        bfin_write_SEC_SCI(1, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
1225
1226        init_software_driven_irq();
1227
1228#ifdef CONFIG_PM
1229        register_syscore_ops(&sec_pm_syscore_ops);
1230#endif
1231
1232        bfin_fault_irq.handler = bfin_fault_routine;
1233#ifdef CONFIG_L1_PARITY_CHECK
1234        setup_irq(IRQ_C0_NMI_L1_PARITY_ERR, &bfin_fault_irq);
1235#endif
1236        setup_irq(IRQ_C0_DBL_FAULT, &bfin_fault_irq);
1237        setup_irq(IRQ_SEC_ERR, &bfin_fault_irq);
1238
1239        return 0;
1240}
1241
1242#ifdef CONFIG_DO_IRQ_L1
1243__attribute__((l1_text))
1244#endif
1245static int vec_to_irq(int vec)
1246{
1247        if (likely(vec == EVT_IVTMR_P))
1248                return IRQ_CORETMR;
1249
1250        return BFIN_IRQ(bfin_read_SEC_SCI(0, SEC_CSID));
1251}
1252#endif  /* SEC_GCTL */
1253
1254#ifdef CONFIG_DO_IRQ_L1
1255__attribute__((l1_text))
1256#endif
1257void do_irq(int vec, struct pt_regs *fp)
1258{
1259        int irq = vec_to_irq(vec);
1260        if (irq == -1)
1261                return;
1262        asm_do_IRQ(irq, fp);
1263}
1264
1265#ifdef CONFIG_IPIPE
1266
1267int __ipipe_get_irq_priority(unsigned irq)
1268{
1269        int ient, prio;
1270
1271        if (irq <= IRQ_CORETMR)
1272                return irq;
1273
1274#ifdef SEC_GCTL
1275        if (irq >= BFIN_IRQ(0))
1276                return IVG11;
1277#else
1278        for (ient = 0; ient < NR_PERI_INTS; ient++) {
1279                struct ivgx *ivg = ivg_table + ient;
1280                if (ivg->irqno == irq) {
1281                        for (prio = 0; prio <= IVG13-IVG7; prio++) {
1282                                if (ivg7_13[prio].ifirst <= ivg &&
1283                                    ivg7_13[prio].istop > ivg)
1284                                        return IVG7 + prio;
1285                        }
1286                }
1287        }
1288#endif
1289
1290        return IVG15;
1291}
1292
1293/* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
1294#ifdef CONFIG_DO_IRQ_L1
1295__attribute__((l1_text))
1296#endif
1297asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1298{
1299        struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
1300        struct ipipe_domain *this_domain = __ipipe_current_domain;
1301        int irq, s = 0;
1302
1303        irq = vec_to_irq(vec);
1304        if (irq == -1)
1305                return 0;
1306
1307        if (irq == IRQ_SYSTMR) {
1308#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
1309                bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
1310#endif
1311                /* This is basically what we need from the register frame. */
1312                __this_cpu_write(__ipipe_tick_regs.ipend, regs->ipend);
1313                __this_cpu_write(__ipipe_tick_regs.pc, regs->pc);
1314                if (this_domain != ipipe_root_domain)
1315                        __this_cpu_and(__ipipe_tick_regs.ipend, ~0x10);
1316                else
1317                        __this_cpu_or(__ipipe_tick_regs.ipend, 0x10);
1318        }
1319
1320        /*
1321         * We don't want Linux interrupt handlers to run at the
1322         * current core priority level (i.e. < EVT15), since this
1323         * might delay other interrupts handled by a high priority
1324         * domain. Here is what we do instead:
1325         *
1326         * - we raise the SYNCDEFER bit to prevent
1327         * __ipipe_handle_irq() to sync the pipeline for the root
1328         * stage for the incoming interrupt. Upon return, that IRQ is
1329         * pending in the interrupt log.
1330         *
1331         * - we raise the TIF_IRQ_SYNC bit for the current thread, so
1332         * that _schedule_and_signal_from_int will eventually sync the
1333         * pipeline from EVT15.
1334         */
1335        if (this_domain == ipipe_root_domain) {
1336                s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1337                barrier();
1338        }
1339
1340        ipipe_trace_irq_entry(irq);
1341        __ipipe_handle_irq(irq, regs);
1342        ipipe_trace_irq_exit(irq);
1343
1344        if (user_mode(regs) &&
1345            !ipipe_test_foreign_stack() &&
1346            (current->ipipe_flags & PF_EVTRET) != 0) {
1347                /*
1348                 * Testing for user_regs() does NOT fully eliminate
1349                 * foreign stack contexts, because of the forged
1350                 * interrupt returns we do through
1351                 * __ipipe_call_irqtail. In that case, we might have
1352                 * preempted a foreign stack context in a high
1353                 * priority domain, with a single interrupt level now
1354                 * pending after the irqtail unwinding is done. In
1355                 * which case user_mode() is now true, and the event
1356                 * gets dispatched spuriously.
1357                 */
1358                current->ipipe_flags &= ~PF_EVTRET;
1359                __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
1360        }
1361
1362        if (this_domain == ipipe_root_domain) {
1363                set_thread_flag(TIF_IRQ_SYNC);
1364                if (!s) {
1365                        __clear_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1366                        return !test_bit(IPIPE_STALL_FLAG, &p->status);
1367                }
1368        }
1369
1370        return 0;
1371}
1372
1373#endif /* CONFIG_IPIPE */
1374