linux/arch/blackfin/mach-common/ints-priority.c
<<
>>
Prefs
   1/*
   2 * Set up the interrupt priorities
   3 *
   4 * Copyright  2004-2009 Analog Devices Inc.
   5 *                 2003 Bas Vermeulen <bas@buyways.nl>
   6 *                 2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca>
   7 *            2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca>
   8 *                 1999 D. Jeff Dionne <jeff@uclinux.org>
   9 *                 1996 Roman Zippel
  10 *
  11 * Licensed under the GPL-2
  12 */
  13
  14#include <linux/module.h>
  15#include <linux/kernel_stat.h>
  16#include <linux/seq_file.h>
  17#include <linux/irq.h>
  18#include <linux/sched.h>
  19#include <linux/syscore_ops.h>
  20#include <asm/delay.h>
  21#ifdef CONFIG_IPIPE
  22#include <linux/ipipe.h>
  23#endif
  24#include <asm/traps.h>
  25#include <asm/blackfin.h>
  26#include <asm/gpio.h>
  27#include <asm/irq_handler.h>
  28#include <asm/dpmc.h>
  29#include <asm/traps.h>
  30
  31/*
  32 * NOTES:
  33 * - we have separated the physical Hardware interrupt from the
  34 * levels that the LINUX kernel sees (see the description in irq.h)
  35 * -
  36 */
  37
  38#ifndef CONFIG_SMP
  39/* Initialize this to an actual value to force it into the .data
  40 * section so that we know it is properly initialized at entry into
  41 * the kernel but before bss is initialized to zero (which is where
  42 * it would live otherwise).  The 0x1f magic represents the IRQs we
  43 * cannot actually mask out in hardware.
  44 */
  45unsigned long bfin_irq_flags = 0x1f;
  46EXPORT_SYMBOL(bfin_irq_flags);
  47#endif
  48
  49#ifdef CONFIG_PM
  50unsigned long bfin_sic_iwr[3];  /* Up to 3 SIC_IWRx registers */
  51unsigned vr_wakeup;
  52#endif
  53
  54#ifndef SEC_GCTL
  55static struct ivgx {
  56        /* irq number for request_irq, available in mach-bf5xx/irq.h */
  57        unsigned int irqno;
  58        /* corresponding bit in the SIC_ISR register */
  59        unsigned int isrflag;
  60} ivg_table[NR_PERI_INTS];
  61
  62static struct ivg_slice {
  63        /* position of first irq in ivg_table for given ivg */
  64        struct ivgx *ifirst;
  65        struct ivgx *istop;
  66} ivg7_13[IVG13 - IVG7 + 1];
  67
  68
  69/*
  70 * Search SIC_IAR and fill tables with the irqvalues
  71 * and their positions in the SIC_ISR register.
  72 */
  73static void __init search_IAR(void)
  74{
  75        unsigned ivg, irq_pos = 0;
  76        for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) {
  77                int irqN;
  78
  79                ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos];
  80
  81                for (irqN = 0; irqN < NR_PERI_INTS; irqN += 4) {
  82                        int irqn;
  83                        u32 iar =
  84                                bfin_read32((unsigned long *)SIC_IAR0 +
  85#if defined(CONFIG_BF51x) || defined(CONFIG_BF52x) || \
  86        defined(CONFIG_BF538) || defined(CONFIG_BF539)
  87                                ((irqN % 32) >> 3) + ((irqN / 32) * ((SIC_IAR4 - SIC_IAR0) / 4))
  88#else
  89                                (irqN >> 3)
  90#endif
  91                                );
  92                        for (irqn = irqN; irqn < irqN + 4; ++irqn) {
  93                                int iar_shift = (irqn & 7) * 4;
  94                                if (ivg == (0xf & (iar >> iar_shift))) {
  95                                        ivg_table[irq_pos].irqno = IVG7 + irqn;
  96                                        ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
  97                                        ivg7_13[ivg].istop++;
  98                                        irq_pos++;
  99                                }
 100                        }
 101                }
 102        }
 103}
 104#endif
 105
 106/*
 107 * This is for core internal IRQs
 108 */
 109void bfin_ack_noop(struct irq_data *d)
 110{
 111        /* Dummy function.  */
 112}
 113
 114static void bfin_core_mask_irq(struct irq_data *d)
 115{
 116        bfin_irq_flags &= ~(1 << d->irq);
 117        if (!hard_irqs_disabled())
 118                hard_local_irq_enable();
 119}
 120
 121static void bfin_core_unmask_irq(struct irq_data *d)
 122{
 123        bfin_irq_flags |= 1 << d->irq;
 124        /*
 125         * If interrupts are enabled, IMASK must contain the same value
 126         * as bfin_irq_flags.  Make sure that invariant holds.  If interrupts
 127         * are currently disabled we need not do anything; one of the
 128         * callers will take care of setting IMASK to the proper value
 129         * when reenabling interrupts.
 130         * local_irq_enable just does "STI bfin_irq_flags", so it's exactly
 131         * what we need.
 132         */
 133        if (!hard_irqs_disabled())
 134                hard_local_irq_enable();
 135        return;
 136}
 137
 138#ifndef SEC_GCTL
 139void bfin_internal_mask_irq(unsigned int irq)
 140{
 141        unsigned long flags = hard_local_irq_save();
 142#ifdef SIC_IMASK0
 143        unsigned mask_bank = BFIN_SYSIRQ(irq) / 32;
 144        unsigned mask_bit = BFIN_SYSIRQ(irq) % 32;
 145        bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
 146                        ~(1 << mask_bit));
 147# if defined(CONFIG_SMP) || defined(CONFIG_ICC)
 148        bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
 149                        ~(1 << mask_bit));
 150# endif
 151#else
 152        bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
 153                        ~(1 << BFIN_SYSIRQ(irq)));
 154#endif /* end of SIC_IMASK0 */
 155        hard_local_irq_restore(flags);
 156}
 157
 158static void bfin_internal_mask_irq_chip(struct irq_data *d)
 159{
 160        bfin_internal_mask_irq(d->irq);
 161}
 162
 163#ifdef CONFIG_SMP
 164void bfin_internal_unmask_irq_affinity(unsigned int irq,
 165                const struct cpumask *affinity)
 166#else
 167void bfin_internal_unmask_irq(unsigned int irq)
 168#endif
 169{
 170        unsigned long flags = hard_local_irq_save();
 171
 172#ifdef SIC_IMASK0
 173        unsigned mask_bank = BFIN_SYSIRQ(irq) / 32;
 174        unsigned mask_bit = BFIN_SYSIRQ(irq) % 32;
 175# ifdef CONFIG_SMP
 176        if (cpumask_test_cpu(0, affinity))
 177# endif
 178                bfin_write_SIC_IMASK(mask_bank,
 179                                bfin_read_SIC_IMASK(mask_bank) |
 180                                (1 << mask_bit));
 181# ifdef CONFIG_SMP
 182        if (cpumask_test_cpu(1, affinity))
 183                bfin_write_SICB_IMASK(mask_bank,
 184                                bfin_read_SICB_IMASK(mask_bank) |
 185                                (1 << mask_bit));
 186# endif
 187#else
 188        bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
 189                        (1 << BFIN_SYSIRQ(irq)));
 190#endif
 191        hard_local_irq_restore(flags);
 192}
 193
 194#ifdef CONFIG_SMP
 195static void bfin_internal_unmask_irq_chip(struct irq_data *d)
 196{
 197        bfin_internal_unmask_irq_affinity(d->irq, d->affinity);
 198}
 199
 200static int bfin_internal_set_affinity(struct irq_data *d,
 201                                      const struct cpumask *mask, bool force)
 202{
 203        bfin_internal_mask_irq(d->irq);
 204        bfin_internal_unmask_irq_affinity(d->irq, mask);
 205
 206        return 0;
 207}
 208#else
 209static void bfin_internal_unmask_irq_chip(struct irq_data *d)
 210{
 211        bfin_internal_unmask_irq(d->irq);
 212}
 213#endif
 214
 215#if defined(CONFIG_PM)
 216int bfin_internal_set_wake(unsigned int irq, unsigned int state)
 217{
 218        u32 bank, bit, wakeup = 0;
 219        unsigned long flags;
 220        bank = BFIN_SYSIRQ(irq) / 32;
 221        bit = BFIN_SYSIRQ(irq) % 32;
 222
 223        switch (irq) {
 224#ifdef IRQ_RTC
 225        case IRQ_RTC:
 226        wakeup |= WAKE;
 227        break;
 228#endif
 229#ifdef IRQ_CAN0_RX
 230        case IRQ_CAN0_RX:
 231        wakeup |= CANWE;
 232        break;
 233#endif
 234#ifdef IRQ_CAN1_RX
 235        case IRQ_CAN1_RX:
 236        wakeup |= CANWE;
 237        break;
 238#endif
 239#ifdef IRQ_USB_INT0
 240        case IRQ_USB_INT0:
 241        wakeup |= USBWE;
 242        break;
 243#endif
 244#ifdef CONFIG_BF54x
 245        case IRQ_CNT:
 246        wakeup |= ROTWE;
 247        break;
 248#endif
 249        default:
 250        break;
 251        }
 252
 253        flags = hard_local_irq_save();
 254
 255        if (state) {
 256                bfin_sic_iwr[bank] |= (1 << bit);
 257                vr_wakeup  |= wakeup;
 258
 259        } else {
 260                bfin_sic_iwr[bank] &= ~(1 << bit);
 261                vr_wakeup  &= ~wakeup;
 262        }
 263
 264        hard_local_irq_restore(flags);
 265
 266        return 0;
 267}
 268
 269static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state)
 270{
 271        return bfin_internal_set_wake(d->irq, state);
 272}
 273#else
 274inline int bfin_internal_set_wake(unsigned int irq, unsigned int state)
 275{
 276        return 0;
 277}
 278# define bfin_internal_set_wake_chip NULL
 279#endif
 280
 281#else /* SEC_GCTL */
 282static void bfin_sec_preflow_handler(struct irq_data *d)
 283{
 284        unsigned long flags = hard_local_irq_save();
 285        unsigned int sid = BFIN_SYSIRQ(d->irq);
 286
 287        bfin_write_SEC_SCI(0, SEC_CSID, sid);
 288
 289        hard_local_irq_restore(flags);
 290}
 291
 292static void bfin_sec_mask_ack_irq(struct irq_data *d)
 293{
 294        unsigned long flags = hard_local_irq_save();
 295        unsigned int sid = BFIN_SYSIRQ(d->irq);
 296
 297        bfin_write_SEC_SCI(0, SEC_CSID, sid);
 298
 299        hard_local_irq_restore(flags);
 300}
 301
 302static void bfin_sec_unmask_irq(struct irq_data *d)
 303{
 304        unsigned long flags = hard_local_irq_save();
 305        unsigned int sid = BFIN_SYSIRQ(d->irq);
 306
 307        bfin_write32(SEC_END, sid);
 308
 309        hard_local_irq_restore(flags);
 310}
 311
 312static void bfin_sec_enable_ssi(unsigned int sid)
 313{
 314        unsigned long flags = hard_local_irq_save();
 315        uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
 316
 317        reg_sctl |= SEC_SCTL_SRC_EN;
 318        bfin_write_SEC_SCTL(sid, reg_sctl);
 319
 320        hard_local_irq_restore(flags);
 321}
 322
 323static void bfin_sec_disable_ssi(unsigned int sid)
 324{
 325        unsigned long flags = hard_local_irq_save();
 326        uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
 327
 328        reg_sctl &= ((uint32_t)~SEC_SCTL_SRC_EN);
 329        bfin_write_SEC_SCTL(sid, reg_sctl);
 330
 331        hard_local_irq_restore(flags);
 332}
 333
 334static void bfin_sec_set_ssi_coreid(unsigned int sid, unsigned int coreid)
 335{
 336        unsigned long flags = hard_local_irq_save();
 337        uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
 338
 339        reg_sctl &= ((uint32_t)~SEC_SCTL_CTG);
 340        bfin_write_SEC_SCTL(sid, reg_sctl | ((coreid << 20) & SEC_SCTL_CTG));
 341
 342        hard_local_irq_restore(flags);
 343}
 344
 345static void bfin_sec_enable_sci(unsigned int sid)
 346{
 347        unsigned long flags = hard_local_irq_save();
 348        uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
 349
 350        if (sid == BFIN_SYSIRQ(IRQ_WATCH0))
 351                reg_sctl |= SEC_SCTL_FAULT_EN;
 352        else
 353                reg_sctl |= SEC_SCTL_INT_EN;
 354        bfin_write_SEC_SCTL(sid, reg_sctl);
 355
 356        hard_local_irq_restore(flags);
 357}
 358
 359static void bfin_sec_disable_sci(unsigned int sid)
 360{
 361        unsigned long flags = hard_local_irq_save();
 362        uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
 363
 364        reg_sctl &= ((uint32_t)~SEC_SCTL_INT_EN);
 365        bfin_write_SEC_SCTL(sid, reg_sctl);
 366
 367        hard_local_irq_restore(flags);
 368}
 369
 370static void bfin_sec_enable(struct irq_data *d)
 371{
 372        unsigned long flags = hard_local_irq_save();
 373        unsigned int sid = BFIN_SYSIRQ(d->irq);
 374
 375        bfin_sec_enable_sci(sid);
 376        bfin_sec_enable_ssi(sid);
 377
 378        hard_local_irq_restore(flags);
 379}
 380
 381static void bfin_sec_disable(struct irq_data *d)
 382{
 383        unsigned long flags = hard_local_irq_save();
 384        unsigned int sid = BFIN_SYSIRQ(d->irq);
 385
 386        bfin_sec_disable_sci(sid);
 387        bfin_sec_disable_ssi(sid);
 388
 389        hard_local_irq_restore(flags);
 390}
 391
 392static void bfin_sec_set_priority(unsigned int sec_int_levels, u8 *sec_int_priority)
 393{
 394        unsigned long flags = hard_local_irq_save();
 395        uint32_t reg_sctl;
 396        int i;
 397
 398        bfin_write_SEC_SCI(0, SEC_CPLVL, sec_int_levels);
 399
 400        for (i = 0; i < SYS_IRQS - BFIN_IRQ(0); i++) {
 401                reg_sctl = bfin_read_SEC_SCTL(i) & ~SEC_SCTL_PRIO;
 402                reg_sctl |= sec_int_priority[i] << SEC_SCTL_PRIO_OFFSET;
 403                bfin_write_SEC_SCTL(i, reg_sctl);
 404        }
 405
 406        hard_local_irq_restore(flags);
 407}
 408
 409void bfin_sec_raise_irq(unsigned int irq)
 410{
 411        unsigned long flags = hard_local_irq_save();
 412        unsigned int sid = BFIN_SYSIRQ(irq);
 413
 414        bfin_write32(SEC_RAISE, sid);
 415
 416        hard_local_irq_restore(flags);
 417}
 418
 419static void init_software_driven_irq(void)
 420{
 421        bfin_sec_set_ssi_coreid(34, 0);
 422        bfin_sec_set_ssi_coreid(35, 1);
 423
 424        bfin_sec_enable_sci(35);
 425        bfin_sec_enable_ssi(35);
 426        bfin_sec_set_ssi_coreid(36, 0);
 427        bfin_sec_set_ssi_coreid(37, 1);
 428        bfin_sec_enable_sci(37);
 429        bfin_sec_enable_ssi(37);
 430}
 431
 432void bfin_sec_resume(void)
 433{
 434        bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
 435        udelay(100);
 436        bfin_write_SEC_GCTL(SEC_GCTL_EN);
 437        bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
 438}
 439
 440void handle_sec_sfi_fault(uint32_t gstat)
 441{
 442
 443}
 444
 445void handle_sec_sci_fault(uint32_t gstat)
 446{
 447        uint32_t core_id;
 448        uint32_t cstat;
 449
 450        core_id = gstat & SEC_GSTAT_SCI;
 451        cstat = bfin_read_SEC_SCI(core_id, SEC_CSTAT);
 452        if (cstat & SEC_CSTAT_ERR) {
 453                switch (cstat & SEC_CSTAT_ERRC) {
 454                case SEC_CSTAT_ACKERR:
 455                        printk(KERN_DEBUG "sec ack err\n");
 456                        break;
 457                default:
 458                        printk(KERN_DEBUG "sec sci unknow err\n");
 459                }
 460        }
 461
 462}
 463
 464void handle_sec_ssi_fault(uint32_t gstat)
 465{
 466        uint32_t sid;
 467        uint32_t sstat;
 468
 469        sid = gstat & SEC_GSTAT_SID;
 470        sstat = bfin_read_SEC_SSTAT(sid);
 471
 472}
 473
 474void handle_sec_fault(unsigned int irq, struct irq_desc *desc)
 475{
 476        uint32_t sec_gstat;
 477
 478        raw_spin_lock(&desc->lock);
 479
 480        sec_gstat = bfin_read32(SEC_GSTAT);
 481        if (sec_gstat & SEC_GSTAT_ERR) {
 482
 483                switch (sec_gstat & SEC_GSTAT_ERRC) {
 484                case 0:
 485                        handle_sec_sfi_fault(sec_gstat);
 486                        break;
 487                case SEC_GSTAT_SCIERR:
 488                        handle_sec_sci_fault(sec_gstat);
 489                        break;
 490                case SEC_GSTAT_SSIERR:
 491                        handle_sec_ssi_fault(sec_gstat);
 492                        break;
 493                }
 494
 495
 496        }
 497
 498        raw_spin_unlock(&desc->lock);
 499
 500        handle_fasteoi_irq(irq, desc);
 501}
 502
 503void handle_core_fault(unsigned int irq, struct irq_desc *desc)
 504{
 505        struct pt_regs *fp = get_irq_regs();
 506
 507        raw_spin_lock(&desc->lock);
 508
 509        switch (irq) {
 510        case IRQ_C0_DBL_FAULT:
 511                double_fault_c(fp);
 512                break;
 513        case IRQ_C0_HW_ERR:
 514                dump_bfin_process(fp);
 515                dump_bfin_mem(fp);
 516                show_regs(fp);
 517                printk(KERN_NOTICE "Kernel Stack\n");
 518                show_stack(current, NULL);
 519                print_modules();
 520                panic("Core 0 hardware error");
 521                break;
 522        case IRQ_C0_NMI_L1_PARITY_ERR:
 523                panic("Core 0 NMI L1 parity error");
 524                break;
 525        default:
 526                panic("Core 1 fault %d occurs unexpectedly", irq);
 527        }
 528
 529        raw_spin_unlock(&desc->lock);
 530}
 531#endif /* SEC_GCTL */
 532
 533static struct irq_chip bfin_core_irqchip = {
 534        .name = "CORE",
 535        .irq_mask = bfin_core_mask_irq,
 536        .irq_unmask = bfin_core_unmask_irq,
 537};
 538
 539#ifndef SEC_GCTL
 540static struct irq_chip bfin_internal_irqchip = {
 541        .name = "INTN",
 542        .irq_mask = bfin_internal_mask_irq_chip,
 543        .irq_unmask = bfin_internal_unmask_irq_chip,
 544        .irq_disable = bfin_internal_mask_irq_chip,
 545        .irq_enable = bfin_internal_unmask_irq_chip,
 546#ifdef CONFIG_SMP
 547        .irq_set_affinity = bfin_internal_set_affinity,
 548#endif
 549        .irq_set_wake = bfin_internal_set_wake_chip,
 550};
 551#else
 552static struct irq_chip bfin_sec_irqchip = {
 553        .name = "SEC",
 554        .irq_mask_ack = bfin_sec_mask_ack_irq,
 555        .irq_mask = bfin_sec_mask_ack_irq,
 556        .irq_unmask = bfin_sec_unmask_irq,
 557        .irq_eoi = bfin_sec_unmask_irq,
 558        .irq_disable = bfin_sec_disable,
 559        .irq_enable = bfin_sec_enable,
 560};
 561#endif
 562
 563void bfin_handle_irq(unsigned irq)
 564{
 565#ifdef CONFIG_IPIPE
 566        struct pt_regs regs;    /* Contents not used. */
 567        ipipe_trace_irq_entry(irq);
 568        __ipipe_handle_irq(irq, &regs);
 569        ipipe_trace_irq_exit(irq);
 570#else /* !CONFIG_IPIPE */
 571        generic_handle_irq(irq);
 572#endif  /* !CONFIG_IPIPE */
 573}
 574
 575#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
 576static int mac_stat_int_mask;
 577
 578static void bfin_mac_status_ack_irq(unsigned int irq)
 579{
 580        switch (irq) {
 581        case IRQ_MAC_MMCINT:
 582                bfin_write_EMAC_MMC_TIRQS(
 583                        bfin_read_EMAC_MMC_TIRQE() &
 584                        bfin_read_EMAC_MMC_TIRQS());
 585                bfin_write_EMAC_MMC_RIRQS(
 586                        bfin_read_EMAC_MMC_RIRQE() &
 587                        bfin_read_EMAC_MMC_RIRQS());
 588                break;
 589        case IRQ_MAC_RXFSINT:
 590                bfin_write_EMAC_RX_STKY(
 591                        bfin_read_EMAC_RX_IRQE() &
 592                        bfin_read_EMAC_RX_STKY());
 593                break;
 594        case IRQ_MAC_TXFSINT:
 595                bfin_write_EMAC_TX_STKY(
 596                        bfin_read_EMAC_TX_IRQE() &
 597                        bfin_read_EMAC_TX_STKY());
 598                break;
 599        case IRQ_MAC_WAKEDET:
 600                 bfin_write_EMAC_WKUP_CTL(
 601                        bfin_read_EMAC_WKUP_CTL() | MPKS | RWKS);
 602                break;
 603        default:
 604                /* These bits are W1C */
 605                bfin_write_EMAC_SYSTAT(1L << (irq - IRQ_MAC_PHYINT));
 606                break;
 607        }
 608}
 609
 610static void bfin_mac_status_mask_irq(struct irq_data *d)
 611{
 612        unsigned int irq = d->irq;
 613
 614        mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT));
 615#ifdef BF537_FAMILY
 616        switch (irq) {
 617        case IRQ_MAC_PHYINT:
 618                bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE);
 619                break;
 620        default:
 621                break;
 622        }
 623#else
 624        if (!mac_stat_int_mask)
 625                bfin_internal_mask_irq(IRQ_MAC_ERROR);
 626#endif
 627        bfin_mac_status_ack_irq(irq);
 628}
 629
 630static void bfin_mac_status_unmask_irq(struct irq_data *d)
 631{
 632        unsigned int irq = d->irq;
 633
 634#ifdef BF537_FAMILY
 635        switch (irq) {
 636        case IRQ_MAC_PHYINT:
 637                bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE);
 638                break;
 639        default:
 640                break;
 641        }
 642#else
 643        if (!mac_stat_int_mask)
 644                bfin_internal_unmask_irq(IRQ_MAC_ERROR);
 645#endif
 646        mac_stat_int_mask |= 1L << (irq - IRQ_MAC_PHYINT);
 647}
 648
 649#ifdef CONFIG_PM
 650int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state)
 651{
 652#ifdef BF537_FAMILY
 653        return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state);
 654#else
 655        return bfin_internal_set_wake(IRQ_MAC_ERROR, state);
 656#endif
 657}
 658#else
 659# define bfin_mac_status_set_wake NULL
 660#endif
 661
 662static struct irq_chip bfin_mac_status_irqchip = {
 663        .name = "MACST",
 664        .irq_mask = bfin_mac_status_mask_irq,
 665        .irq_unmask = bfin_mac_status_unmask_irq,
 666        .irq_set_wake = bfin_mac_status_set_wake,
 667};
 668
 669void bfin_demux_mac_status_irq(unsigned int int_err_irq,
 670                               struct irq_desc *inta_desc)
 671{
 672        int i, irq = 0;
 673        u32 status = bfin_read_EMAC_SYSTAT();
 674
 675        for (i = 0; i <= (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++)
 676                if (status & (1L << i)) {
 677                        irq = IRQ_MAC_PHYINT + i;
 678                        break;
 679                }
 680
 681        if (irq) {
 682                if (mac_stat_int_mask & (1L << (irq - IRQ_MAC_PHYINT))) {
 683                        bfin_handle_irq(irq);
 684                } else {
 685                        bfin_mac_status_ack_irq(irq);
 686                        pr_debug("IRQ %d:"
 687                                        " MASKED MAC ERROR INTERRUPT ASSERTED\n",
 688                                        irq);
 689                }
 690        } else
 691                printk(KERN_ERR
 692                                "%s : %s : LINE %d :\nIRQ ?: MAC ERROR"
 693                                " INTERRUPT ASSERTED BUT NO SOURCE FOUND"
 694                                "(EMAC_SYSTAT=0x%X)\n",
 695                                __func__, __FILE__, __LINE__, status);
 696}
 697#endif
 698
 699static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
 700{
 701#ifdef CONFIG_IPIPE
 702        handle = handle_level_irq;
 703#endif
 704        __irq_set_handler_locked(irq, handle);
 705}
 706
 707static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
 708extern void bfin_gpio_irq_prepare(unsigned gpio);
 709
 710#if !BFIN_GPIO_PINT
 711
 712static void bfin_gpio_ack_irq(struct irq_data *d)
 713{
 714        /* AFAIK ack_irq in case mask_ack is provided
 715         * get's only called for edge sense irqs
 716         */
 717        set_gpio_data(irq_to_gpio(d->irq), 0);
 718}
 719
 720static void bfin_gpio_mask_ack_irq(struct irq_data *d)
 721{
 722        unsigned int irq = d->irq;
 723        u32 gpionr = irq_to_gpio(irq);
 724
 725        if (!irqd_is_level_type(d))
 726                set_gpio_data(gpionr, 0);
 727
 728        set_gpio_maska(gpionr, 0);
 729}
 730
 731static void bfin_gpio_mask_irq(struct irq_data *d)
 732{
 733        set_gpio_maska(irq_to_gpio(d->irq), 0);
 734}
 735
 736static void bfin_gpio_unmask_irq(struct irq_data *d)
 737{
 738        set_gpio_maska(irq_to_gpio(d->irq), 1);
 739}
 740
 741static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
 742{
 743        u32 gpionr = irq_to_gpio(d->irq);
 744
 745        if (__test_and_set_bit(gpionr, gpio_enabled))
 746                bfin_gpio_irq_prepare(gpionr);
 747
 748        bfin_gpio_unmask_irq(d);
 749
 750        return 0;
 751}
 752
 753static void bfin_gpio_irq_shutdown(struct irq_data *d)
 754{
 755        u32 gpionr = irq_to_gpio(d->irq);
 756
 757        bfin_gpio_mask_irq(d);
 758        __clear_bit(gpionr, gpio_enabled);
 759        bfin_gpio_irq_free(gpionr);
 760}
 761
 762static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
 763{
 764        unsigned int irq = d->irq;
 765        int ret;
 766        char buf[16];
 767        u32 gpionr = irq_to_gpio(irq);
 768
 769        if (type == IRQ_TYPE_PROBE) {
 770                /* only probe unenabled GPIO interrupt lines */
 771                if (test_bit(gpionr, gpio_enabled))
 772                        return 0;
 773                type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
 774        }
 775
 776        if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
 777                    IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
 778
 779                snprintf(buf, 16, "gpio-irq%d", irq);
 780                ret = bfin_gpio_irq_request(gpionr, buf);
 781                if (ret)
 782                        return ret;
 783
 784                if (__test_and_set_bit(gpionr, gpio_enabled))
 785                        bfin_gpio_irq_prepare(gpionr);
 786
 787        } else {
 788                __clear_bit(gpionr, gpio_enabled);
 789                return 0;
 790        }
 791
 792        set_gpio_inen(gpionr, 0);
 793        set_gpio_dir(gpionr, 0);
 794
 795        if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
 796            == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
 797                set_gpio_both(gpionr, 1);
 798        else
 799                set_gpio_both(gpionr, 0);
 800
 801        if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
 802                set_gpio_polar(gpionr, 1);      /* low or falling edge denoted by one */
 803        else
 804                set_gpio_polar(gpionr, 0);      /* high or rising edge denoted by zero */
 805
 806        if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
 807                set_gpio_edge(gpionr, 1);
 808                set_gpio_inen(gpionr, 1);
 809                set_gpio_data(gpionr, 0);
 810
 811        } else {
 812                set_gpio_edge(gpionr, 0);
 813                set_gpio_inen(gpionr, 1);
 814        }
 815
 816        if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
 817                bfin_set_irq_handler(irq, handle_edge_irq);
 818        else
 819                bfin_set_irq_handler(irq, handle_level_irq);
 820
 821        return 0;
 822}
 823
 824#ifdef CONFIG_PM
 825static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
 826{
 827        return gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state);
 828}
 829#else
 830# define bfin_gpio_set_wake NULL
 831#endif
 832
 833static void bfin_demux_gpio_block(unsigned int irq)
 834{
 835        unsigned int gpio, mask;
 836
 837        gpio = irq_to_gpio(irq);
 838        mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
 839
 840        while (mask) {
 841                if (mask & 1)
 842                        bfin_handle_irq(irq);
 843                irq++;
 844                mask >>= 1;
 845        }
 846}
 847
 848void bfin_demux_gpio_irq(unsigned int inta_irq,
 849                        struct irq_desc *desc)
 850{
 851        unsigned int irq;
 852
 853        switch (inta_irq) {
 854#if defined(BF537_FAMILY)
 855        case IRQ_PF_INTA_PG_INTA:
 856                bfin_demux_gpio_block(IRQ_PF0);
 857                irq = IRQ_PG0;
 858                break;
 859        case IRQ_PH_INTA_MAC_RX:
 860                irq = IRQ_PH0;
 861                break;
 862#elif defined(BF533_FAMILY)
 863        case IRQ_PROG_INTA:
 864                irq = IRQ_PF0;
 865                break;
 866#elif defined(BF538_FAMILY)
 867        case IRQ_PORTF_INTA:
 868                irq = IRQ_PF0;
 869                break;
 870#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
 871        case IRQ_PORTF_INTA:
 872                irq = IRQ_PF0;
 873                break;
 874        case IRQ_PORTG_INTA:
 875                irq = IRQ_PG0;
 876                break;
 877        case IRQ_PORTH_INTA:
 878                irq = IRQ_PH0;
 879                break;
 880#elif defined(CONFIG_BF561)
 881        case IRQ_PROG0_INTA:
 882                irq = IRQ_PF0;
 883                break;
 884        case IRQ_PROG1_INTA:
 885                irq = IRQ_PF16;
 886                break;
 887        case IRQ_PROG2_INTA:
 888                irq = IRQ_PF32;
 889                break;
 890#endif
 891        default:
 892                BUG();
 893                return;
 894        }
 895
 896        bfin_demux_gpio_block(irq);
 897}
 898
 899#else
 900
 901#define NR_PINT_BITS            32
 902#define IRQ_NOT_AVAIL           0xFF
 903
 904#define PINT_2_BANK(x)          ((x) >> 5)
 905#define PINT_2_BIT(x)           ((x) & 0x1F)
 906#define PINT_BIT(x)             (1 << (PINT_2_BIT(x)))
 907
 908static unsigned char irq2pint_lut[NR_PINTS];
 909static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS];
 910
 911static struct bfin_pint_regs * const pint[NR_PINT_SYS_IRQS] = {
 912        (struct bfin_pint_regs *)PINT0_MASK_SET,
 913        (struct bfin_pint_regs *)PINT1_MASK_SET,
 914        (struct bfin_pint_regs *)PINT2_MASK_SET,
 915        (struct bfin_pint_regs *)PINT3_MASK_SET,
 916#ifdef CONFIG_BF60x
 917        (struct bfin_pint_regs *)PINT4_MASK_SET,
 918        (struct bfin_pint_regs *)PINT5_MASK_SET,
 919#endif
 920};
 921
 922inline unsigned int get_irq_base(u32 bank, u8 bmap)
 923{
 924        unsigned int irq_base;
 925
 926#ifndef CONFIG_BF60x
 927        if (bank < 2) {         /*PA-PB */
 928                irq_base = IRQ_PA0 + bmap * 16;
 929        } else {                /*PC-PJ */
 930                irq_base = IRQ_PC0 + bmap * 16;
 931        }
 932#else
 933        irq_base = IRQ_PA0 + bank * 16 + bmap * 16;
 934#endif
 935        return irq_base;
 936}
 937
 938        /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
 939void init_pint_lut(void)
 940{
 941        u16 bank, bit, irq_base, bit_pos;
 942        u32 pint_assign;
 943        u8 bmap;
 944
 945        memset(irq2pint_lut, IRQ_NOT_AVAIL, sizeof(irq2pint_lut));
 946
 947        for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) {
 948
 949                pint_assign = pint[bank]->assign;
 950
 951                for (bit = 0; bit < NR_PINT_BITS; bit++) {
 952
 953                        bmap = (pint_assign >> ((bit / 8) * 8)) & 0xFF;
 954
 955                        irq_base = get_irq_base(bank, bmap);
 956
 957                        irq_base += (bit % 8) + ((bit / 8) & 1 ? 8 : 0);
 958                        bit_pos = bit + bank * NR_PINT_BITS;
 959
 960                        pint2irq_lut[bit_pos] = irq_base - SYS_IRQS;
 961                        irq2pint_lut[irq_base - SYS_IRQS] = bit_pos;
 962                }
 963        }
 964}
 965
 966static void bfin_gpio_ack_irq(struct irq_data *d)
 967{
 968        u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
 969        u32 pintbit = PINT_BIT(pint_val);
 970        u32 bank = PINT_2_BANK(pint_val);
 971
 972        if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
 973                if (pint[bank]->invert_set & pintbit)
 974                        pint[bank]->invert_clear = pintbit;
 975                else
 976                        pint[bank]->invert_set = pintbit;
 977        }
 978        pint[bank]->request = pintbit;
 979
 980}
 981
 982static void bfin_gpio_mask_ack_irq(struct irq_data *d)
 983{
 984        u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
 985        u32 pintbit = PINT_BIT(pint_val);
 986        u32 bank = PINT_2_BANK(pint_val);
 987
 988        if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
 989                if (pint[bank]->invert_set & pintbit)
 990                        pint[bank]->invert_clear = pintbit;
 991                else
 992                        pint[bank]->invert_set = pintbit;
 993        }
 994
 995        pint[bank]->request = pintbit;
 996        pint[bank]->mask_clear = pintbit;
 997}
 998
 999static void bfin_gpio_mask_irq(struct irq_data *d)
1000{
1001        u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
1002
1003        pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val);
1004}
1005
1006static void bfin_gpio_unmask_irq(struct irq_data *d)
1007{
1008        u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
1009        u32 pintbit = PINT_BIT(pint_val);
1010        u32 bank = PINT_2_BANK(pint_val);
1011
1012        pint[bank]->mask_set = pintbit;
1013}
1014
1015static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
1016{
1017        unsigned int irq = d->irq;
1018        u32 gpionr = irq_to_gpio(irq);
1019        u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
1020
1021        if (pint_val == IRQ_NOT_AVAIL) {
1022                printk(KERN_ERR
1023                "GPIO IRQ %d :Not in PINT Assign table "
1024                "Reconfigure Interrupt to Port Assignemt\n", irq);
1025                return -ENODEV;
1026        }
1027
1028        if (__test_and_set_bit(gpionr, gpio_enabled))
1029                bfin_gpio_irq_prepare(gpionr);
1030
1031        bfin_gpio_unmask_irq(d);
1032
1033        return 0;
1034}
1035
1036static void bfin_gpio_irq_shutdown(struct irq_data *d)
1037{
1038        u32 gpionr = irq_to_gpio(d->irq);
1039
1040        bfin_gpio_mask_irq(d);
1041        __clear_bit(gpionr, gpio_enabled);
1042        bfin_gpio_irq_free(gpionr);
1043}
1044
1045static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
1046{
1047        unsigned int irq = d->irq;
1048        int ret;
1049        char buf[16];
1050        u32 gpionr = irq_to_gpio(irq);
1051        u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
1052        u32 pintbit = PINT_BIT(pint_val);
1053        u32 bank = PINT_2_BANK(pint_val);
1054
1055        if (pint_val == IRQ_NOT_AVAIL)
1056                return -ENODEV;
1057
1058        if (type == IRQ_TYPE_PROBE) {
1059                /* only probe unenabled GPIO interrupt lines */
1060                if (test_bit(gpionr, gpio_enabled))
1061                        return 0;
1062                type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
1063        }
1064
1065        if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
1066                    IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
1067
1068                snprintf(buf, 16, "gpio-irq%d", irq);
1069                ret = bfin_gpio_irq_request(gpionr, buf);
1070                if (ret)
1071                        return ret;
1072
1073                if (__test_and_set_bit(gpionr, gpio_enabled))
1074                        bfin_gpio_irq_prepare(gpionr);
1075
1076        } else {
1077                __clear_bit(gpionr, gpio_enabled);
1078                return 0;
1079        }
1080
1081        if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
1082                pint[bank]->invert_set = pintbit;       /* low or falling edge denoted by one */
1083        else
1084                pint[bank]->invert_clear = pintbit;     /* high or rising edge denoted by zero */
1085
1086        if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
1087            == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
1088                if (gpio_get_value(gpionr))
1089                        pint[bank]->invert_set = pintbit;
1090                else
1091                        pint[bank]->invert_clear = pintbit;
1092        }
1093
1094        if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
1095                pint[bank]->edge_set = pintbit;
1096                bfin_set_irq_handler(irq, handle_edge_irq);
1097        } else {
1098                pint[bank]->edge_clear = pintbit;
1099                bfin_set_irq_handler(irq, handle_level_irq);
1100        }
1101
1102        return 0;
1103}
1104
1105#ifdef CONFIG_PM
1106static struct bfin_pm_pint_save save_pint_reg[NR_PINT_SYS_IRQS];
1107static u32 save_pint_sec_ctl[NR_PINT_SYS_IRQS];
1108
1109static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
1110{
1111        u32 pint_irq;
1112        u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
1113        u32 bank = PINT_2_BANK(pint_val);
1114
1115        switch (bank) {
1116        case 0:
1117                pint_irq = IRQ_PINT0;
1118                break;
1119        case 2:
1120                pint_irq = IRQ_PINT2;
1121                break;
1122        case 3:
1123                pint_irq = IRQ_PINT3;
1124                break;
1125        case 1:
1126                pint_irq = IRQ_PINT1;
1127                break;
1128#ifdef CONFIG_BF60x
1129        case 4:
1130                pint_irq = IRQ_PINT4;
1131                break;
1132        case 5:
1133                pint_irq = IRQ_PINT5;
1134                break;
1135#endif
1136        default:
1137                return -EINVAL;
1138        }
1139
1140#ifndef SEC_GCTL
1141        bfin_internal_set_wake(pint_irq, state);
1142#endif
1143
1144        return 0;
1145}
1146
1147void bfin_pint_suspend(void)
1148{
1149        u32 bank;
1150
1151        for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) {
1152                save_pint_reg[bank].mask_set = pint[bank]->mask_set;
1153                save_pint_reg[bank].assign = pint[bank]->assign;
1154                save_pint_reg[bank].edge_set = pint[bank]->edge_set;
1155                save_pint_reg[bank].invert_set = pint[bank]->invert_set;
1156        }
1157}
1158
1159void bfin_pint_resume(void)
1160{
1161        u32 bank;
1162
1163        for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) {
1164                pint[bank]->mask_set = save_pint_reg[bank].mask_set;
1165                pint[bank]->assign = save_pint_reg[bank].assign;
1166                pint[bank]->edge_set = save_pint_reg[bank].edge_set;
1167                pint[bank]->invert_set = save_pint_reg[bank].invert_set;
1168        }
1169}
1170
1171#ifdef SEC_GCTL
1172static int sec_suspend(void)
1173{
1174        u32 bank;
1175
1176        for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++)
1177                save_pint_sec_ctl[bank] = bfin_read_SEC_SCTL(bank + BFIN_SYSIRQ(IRQ_PINT0));
1178        return 0;
1179}
1180
1181static void sec_resume(void)
1182{
1183        u32 bank;
1184
1185        bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
1186        udelay(100);
1187        bfin_write_SEC_GCTL(SEC_GCTL_EN);
1188        bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
1189
1190        for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++)
1191                bfin_write_SEC_SCTL(bank + BFIN_SYSIRQ(IRQ_PINT0), save_pint_sec_ctl[bank]);
1192}
1193
1194static struct syscore_ops sec_pm_syscore_ops = {
1195        .suspend = sec_suspend,
1196        .resume = sec_resume,
1197};
1198
1199#endif
1200#else
1201# define bfin_gpio_set_wake NULL
1202#endif
1203
1204void bfin_demux_gpio_irq(unsigned int inta_irq,
1205                        struct irq_desc *desc)
1206{
1207        u32 bank, pint_val;
1208        u32 request, irq;
1209        u32 level_mask;
1210        int umask = 0;
1211        struct irq_chip *chip = irq_desc_get_chip(desc);
1212
1213        if (chip->irq_mask_ack) {
1214                chip->irq_mask_ack(&desc->irq_data);
1215        } else {
1216                chip->irq_mask(&desc->irq_data);
1217                if (chip->irq_ack)
1218                        chip->irq_ack(&desc->irq_data);
1219        }
1220
1221        switch (inta_irq) {
1222        case IRQ_PINT0:
1223                bank = 0;
1224                break;
1225        case IRQ_PINT2:
1226                bank = 2;
1227                break;
1228        case IRQ_PINT3:
1229                bank = 3;
1230                break;
1231        case IRQ_PINT1:
1232                bank = 1;
1233                break;
1234#ifdef CONFIG_BF60x
1235        case IRQ_PINT4:
1236                bank = 4;
1237                break;
1238        case IRQ_PINT5:
1239                bank = 5;
1240                break;
1241#endif
1242        default:
1243                return;
1244        }
1245
1246        pint_val = bank * NR_PINT_BITS;
1247
1248        request = pint[bank]->request;
1249
1250        level_mask = pint[bank]->edge_set & request;
1251
1252        while (request) {
1253                if (request & 1) {
1254                        irq = pint2irq_lut[pint_val] + SYS_IRQS;
1255                        if (level_mask & PINT_BIT(pint_val)) {
1256                                umask = 1;
1257                                chip->irq_unmask(&desc->irq_data);
1258                        }
1259                        bfin_handle_irq(irq);
1260                }
1261                pint_val++;
1262                request >>= 1;
1263        }
1264
1265        if (!umask)
1266                chip->irq_unmask(&desc->irq_data);
1267}
1268#endif
1269
1270static struct irq_chip bfin_gpio_irqchip = {
1271        .name = "GPIO",
1272        .irq_ack = bfin_gpio_ack_irq,
1273        .irq_mask = bfin_gpio_mask_irq,
1274        .irq_mask_ack = bfin_gpio_mask_ack_irq,
1275        .irq_unmask = bfin_gpio_unmask_irq,
1276        .irq_disable = bfin_gpio_mask_irq,
1277        .irq_enable = bfin_gpio_unmask_irq,
1278        .irq_set_type = bfin_gpio_irq_type,
1279        .irq_startup = bfin_gpio_irq_startup,
1280        .irq_shutdown = bfin_gpio_irq_shutdown,
1281        .irq_set_wake = bfin_gpio_set_wake,
1282};
1283
1284void init_exception_vectors(void)
1285{
1286        /* cannot program in software:
1287         * evt0 - emulation (jtag)
1288         * evt1 - reset
1289         */
1290        bfin_write_EVT2(evt_nmi);
1291        bfin_write_EVT3(trap);
1292        bfin_write_EVT5(evt_ivhw);
1293        bfin_write_EVT6(evt_timer);
1294        bfin_write_EVT7(evt_evt7);
1295        bfin_write_EVT8(evt_evt8);
1296        bfin_write_EVT9(evt_evt9);
1297        bfin_write_EVT10(evt_evt10);
1298        bfin_write_EVT11(evt_evt11);
1299        bfin_write_EVT12(evt_evt12);
1300        bfin_write_EVT13(evt_evt13);
1301        bfin_write_EVT14(evt_evt14);
1302        bfin_write_EVT15(evt_system_call);
1303        CSYNC();
1304}
1305
1306#ifndef SEC_GCTL
1307/*
1308 * This function should be called during kernel startup to initialize
1309 * the BFin IRQ handling routines.
1310 */
1311
1312int __init init_arch_irq(void)
1313{
1314        int irq;
1315        unsigned long ilat = 0;
1316
1317        /*  Disable all the peripheral intrs  - page 4-29 HW Ref manual */
1318#ifdef SIC_IMASK0
1319        bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
1320        bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
1321# ifdef SIC_IMASK2
1322        bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
1323# endif
1324# if defined(CONFIG_SMP) || defined(CONFIG_ICC)
1325        bfin_write_SICB_IMASK0(SIC_UNMASK_ALL);
1326        bfin_write_SICB_IMASK1(SIC_UNMASK_ALL);
1327# endif
1328#else
1329        bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
1330#endif
1331
1332        local_irq_disable();
1333
1334#if BFIN_GPIO_PINT
1335# ifdef CONFIG_PINTx_REASSIGN
1336        pint[0]->assign = CONFIG_PINT0_ASSIGN;
1337        pint[1]->assign = CONFIG_PINT1_ASSIGN;
1338        pint[2]->assign = CONFIG_PINT2_ASSIGN;
1339        pint[3]->assign = CONFIG_PINT3_ASSIGN;
1340# endif
1341        /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
1342        init_pint_lut();
1343#endif
1344
1345        for (irq = 0; irq <= SYS_IRQS; irq++) {
1346                if (irq <= IRQ_CORETMR)
1347                        irq_set_chip(irq, &bfin_core_irqchip);
1348                else
1349                        irq_set_chip(irq, &bfin_internal_irqchip);
1350
1351                switch (irq) {
1352#if BFIN_GPIO_PINT
1353                case IRQ_PINT0:
1354                case IRQ_PINT1:
1355                case IRQ_PINT2:
1356                case IRQ_PINT3:
1357#elif defined(BF537_FAMILY)
1358                case IRQ_PH_INTA_MAC_RX:
1359                case IRQ_PF_INTA_PG_INTA:
1360#elif defined(BF533_FAMILY)
1361                case IRQ_PROG_INTA:
1362#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
1363                case IRQ_PORTF_INTA:
1364                case IRQ_PORTG_INTA:
1365                case IRQ_PORTH_INTA:
1366#elif defined(CONFIG_BF561)
1367                case IRQ_PROG0_INTA:
1368                case IRQ_PROG1_INTA:
1369                case IRQ_PROG2_INTA:
1370#elif defined(BF538_FAMILY)
1371                case IRQ_PORTF_INTA:
1372#endif
1373                        irq_set_chained_handler(irq, bfin_demux_gpio_irq);
1374                        break;
1375#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1376                case IRQ_MAC_ERROR:
1377                        irq_set_chained_handler(irq,
1378                                                bfin_demux_mac_status_irq);
1379                        break;
1380#endif
1381#if defined(CONFIG_SMP) || defined(CONFIG_ICC)
1382                case IRQ_SUPPLE_0:
1383                case IRQ_SUPPLE_1:
1384                        irq_set_handler(irq, handle_percpu_irq);
1385                        break;
1386#endif
1387
1388#ifdef CONFIG_TICKSOURCE_CORETMR
1389                case IRQ_CORETMR:
1390# ifdef CONFIG_SMP
1391                        irq_set_handler(irq, handle_percpu_irq);
1392# else
1393                        irq_set_handler(irq, handle_simple_irq);
1394# endif
1395                        break;
1396#endif
1397
1398#ifdef CONFIG_TICKSOURCE_GPTMR0
1399                case IRQ_TIMER0:
1400                        irq_set_handler(irq, handle_simple_irq);
1401                        break;
1402#endif
1403
1404                default:
1405#ifdef CONFIG_IPIPE
1406                        irq_set_handler(irq, handle_level_irq);
1407#else
1408                        irq_set_handler(irq, handle_simple_irq);
1409#endif
1410                        break;
1411                }
1412        }
1413
1414        init_mach_irq();
1415
1416#if (defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
1417        for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
1418                irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip,
1419                                         handle_level_irq);
1420#endif
1421        /* if configured as edge, then will be changed to do_edge_IRQ */
1422        for (irq = GPIO_IRQ_BASE;
1423                irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
1424                irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
1425                                         handle_level_irq);
1426        bfin_write_IMASK(0);
1427        CSYNC();
1428        ilat = bfin_read_ILAT();
1429        CSYNC();
1430        bfin_write_ILAT(ilat);
1431        CSYNC();
1432
1433        printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
1434        /* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx,
1435         * local_irq_enable()
1436         */
1437        program_IAR();
1438        /* Therefore it's better to setup IARs before interrupts enabled */
1439        search_IAR();
1440
1441        /* Enable interrupts IVG7-15 */
1442        bfin_irq_flags |= IMASK_IVG15 |
1443                IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1444                IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1445
1446
1447        /* This implicitly covers ANOMALY_05000171
1448         * Boot-ROM code modifies SICA_IWRx wakeup registers
1449         */
1450#ifdef SIC_IWR0
1451        bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
1452# ifdef SIC_IWR1
1453        /* BF52x/BF51x system reset does not properly reset SIC_IWR1 which
1454         * will screw up the bootrom as it relies on MDMA0/1 waking it
1455         * up from IDLE instructions.  See this report for more info:
1456         * http://blackfin.uclinux.org/gf/tracker/4323
1457         */
1458        if (ANOMALY_05000435)
1459                bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
1460        else
1461                bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
1462# endif
1463# ifdef SIC_IWR2
1464        bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
1465# endif
1466#else
1467        bfin_write_SIC_IWR(IWR_DISABLE_ALL);
1468#endif
1469        return 0;
1470}
1471
1472#ifdef CONFIG_DO_IRQ_L1
1473__attribute__((l1_text))
1474#endif
1475static int vec_to_irq(int vec)
1476{
1477        struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
1478        struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
1479        unsigned long sic_status[3];
1480        if (likely(vec == EVT_IVTMR_P))
1481                return IRQ_CORETMR;
1482#ifdef SIC_ISR
1483        sic_status[0] = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1484#else
1485        if (smp_processor_id()) {
1486# ifdef SICB_ISR0
1487                /* This will be optimized out in UP mode. */
1488                sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
1489                sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
1490# endif
1491        } else {
1492                sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1493                sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1494        }
1495#endif
1496#ifdef SIC_ISR2
1497        sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1498#endif
1499
1500        for (;; ivg++) {
1501                if (ivg >= ivg_stop)
1502                        return -1;
1503#ifdef SIC_ISR
1504                if (sic_status[0] & ivg->isrflag)
1505#else
1506                if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1507#endif
1508                        return ivg->irqno;
1509        }
1510}
1511
1512#else /* SEC_GCTL */
1513
1514/*
1515 * This function should be called during kernel startup to initialize
1516 * the BFin IRQ handling routines.
1517 */
1518
1519int __init init_arch_irq(void)
1520{
1521        int irq;
1522        unsigned long ilat = 0;
1523
1524        bfin_write_SEC_GCTL(SEC_GCTL_RESET);
1525
1526        local_irq_disable();
1527
1528#if BFIN_GPIO_PINT
1529# ifdef CONFIG_PINTx_REASSIGN
1530        pint[0]->assign = CONFIG_PINT0_ASSIGN;
1531        pint[1]->assign = CONFIG_PINT1_ASSIGN;
1532        pint[2]->assign = CONFIG_PINT2_ASSIGN;
1533        pint[3]->assign = CONFIG_PINT3_ASSIGN;
1534        pint[4]->assign = CONFIG_PINT4_ASSIGN;
1535        pint[5]->assign = CONFIG_PINT5_ASSIGN;
1536# endif
1537        /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
1538        init_pint_lut();
1539#endif
1540
1541        for (irq = 0; irq <= SYS_IRQS; irq++) {
1542                if (irq <= IRQ_CORETMR) {
1543                        irq_set_chip_and_handler(irq, &bfin_core_irqchip,
1544                                handle_simple_irq);
1545#if defined(CONFIG_TICKSOURCE_CORETMR) && defined(CONFIG_SMP)
1546                        if (irq == IRQ_CORETMR)
1547                                irq_set_handler(irq, handle_percpu_irq);
1548#endif
1549                } else if (irq >= BFIN_IRQ(21) && irq <= BFIN_IRQ(26)) {
1550                        irq_set_chip(irq, &bfin_sec_irqchip);
1551                        irq_set_chained_handler(irq, bfin_demux_gpio_irq);
1552                } else if (irq >= BFIN_IRQ(34) && irq <= BFIN_IRQ(37)) {
1553                        irq_set_chip_and_handler(irq, &bfin_sec_irqchip,
1554                                handle_percpu_irq);
1555                } else {
1556                        irq_set_chip(irq, &bfin_sec_irqchip);
1557                        if (irq == IRQ_SEC_ERR)
1558                                irq_set_handler(irq, handle_sec_fault);
1559                        else if (irq >= IRQ_C0_DBL_FAULT && irq < CORE_IRQS)
1560                                irq_set_handler(irq, handle_core_fault);
1561                        else
1562                                irq_set_handler(irq, handle_fasteoi_irq);
1563                        __irq_set_preflow_handler(irq, bfin_sec_preflow_handler);
1564                }
1565        }
1566        for (irq = GPIO_IRQ_BASE;
1567                irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
1568                irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
1569                                        handle_level_irq);
1570
1571        bfin_write_IMASK(0);
1572        CSYNC();
1573        ilat = bfin_read_ILAT();
1574        CSYNC();
1575        bfin_write_ILAT(ilat);
1576        CSYNC();
1577
1578        printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
1579
1580        bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
1581
1582        bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
1583
1584        /* Enable interrupts IVG7-15 */
1585        bfin_irq_flags |= IMASK_IVG15 |
1586            IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1587            IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1588
1589
1590        bfin_write_SEC_FCTL(SEC_FCTL_EN | SEC_FCTL_SYSRST_EN | SEC_FCTL_FLTIN_EN);
1591        bfin_sec_enable_sci(BFIN_SYSIRQ(IRQ_WATCH0));
1592        bfin_sec_enable_ssi(BFIN_SYSIRQ(IRQ_WATCH0));
1593        bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
1594        udelay(100);
1595        bfin_write_SEC_GCTL(SEC_GCTL_EN);
1596        bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
1597        bfin_write_SEC_SCI(1, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
1598
1599        init_software_driven_irq();
1600
1601#ifdef CONFIG_PM
1602        register_syscore_ops(&sec_pm_syscore_ops);
1603#endif
1604
1605        return 0;
1606}
1607
1608#ifdef CONFIG_DO_IRQ_L1
1609__attribute__((l1_text))
1610#endif
1611static int vec_to_irq(int vec)
1612{
1613        if (likely(vec == EVT_IVTMR_P))
1614                return IRQ_CORETMR;
1615
1616        return BFIN_IRQ(bfin_read_SEC_SCI(0, SEC_CSID));
1617}
1618#endif  /* SEC_GCTL */
1619
1620#ifdef CONFIG_DO_IRQ_L1
1621__attribute__((l1_text))
1622#endif
1623void do_irq(int vec, struct pt_regs *fp)
1624{
1625        int irq = vec_to_irq(vec);
1626        if (irq == -1)
1627                return;
1628        asm_do_IRQ(irq, fp);
1629}
1630
1631#ifdef CONFIG_IPIPE
1632
1633int __ipipe_get_irq_priority(unsigned irq)
1634{
1635        int ient, prio;
1636
1637        if (irq <= IRQ_CORETMR)
1638                return irq;
1639
1640#ifdef SEC_GCTL
1641        if (irq >= BFIN_IRQ(0))
1642                return IVG11;
1643#else
1644        for (ient = 0; ient < NR_PERI_INTS; ient++) {
1645                struct ivgx *ivg = ivg_table + ient;
1646                if (ivg->irqno == irq) {
1647                        for (prio = 0; prio <= IVG13-IVG7; prio++) {
1648                                if (ivg7_13[prio].ifirst <= ivg &&
1649                                    ivg7_13[prio].istop > ivg)
1650                                        return IVG7 + prio;
1651                        }
1652                }
1653        }
1654#endif
1655
1656        return IVG15;
1657}
1658
1659/* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
1660#ifdef CONFIG_DO_IRQ_L1
1661__attribute__((l1_text))
1662#endif
1663asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1664{
1665        struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
1666        struct ipipe_domain *this_domain = __ipipe_current_domain;
1667        int irq, s = 0;
1668
1669        irq = vec_to_irq(vec);
1670        if (irq == -1)
1671                return 0;
1672
1673        if (irq == IRQ_SYSTMR) {
1674#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
1675                bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
1676#endif
1677                /* This is basically what we need from the register frame. */
1678                __raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
1679                __raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
1680                if (this_domain != ipipe_root_domain)
1681                        __raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
1682                else
1683                        __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
1684        }
1685
1686        /*
1687         * We don't want Linux interrupt handlers to run at the
1688         * current core priority level (i.e. < EVT15), since this
1689         * might delay other interrupts handled by a high priority
1690         * domain. Here is what we do instead:
1691         *
1692         * - we raise the SYNCDEFER bit to prevent
1693         * __ipipe_handle_irq() to sync the pipeline for the root
1694         * stage for the incoming interrupt. Upon return, that IRQ is
1695         * pending in the interrupt log.
1696         *
1697         * - we raise the TIF_IRQ_SYNC bit for the current thread, so
1698         * that _schedule_and_signal_from_int will eventually sync the
1699         * pipeline from EVT15.
1700         */
1701        if (this_domain == ipipe_root_domain) {
1702                s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1703                barrier();
1704        }
1705
1706        ipipe_trace_irq_entry(irq);
1707        __ipipe_handle_irq(irq, regs);
1708        ipipe_trace_irq_exit(irq);
1709
1710        if (user_mode(regs) &&
1711            !ipipe_test_foreign_stack() &&
1712            (current->ipipe_flags & PF_EVTRET) != 0) {
1713                /*
1714                 * Testing for user_regs() does NOT fully eliminate
1715                 * foreign stack contexts, because of the forged
1716                 * interrupt returns we do through
1717                 * __ipipe_call_irqtail. In that case, we might have
1718                 * preempted a foreign stack context in a high
1719                 * priority domain, with a single interrupt level now
1720                 * pending after the irqtail unwinding is done. In
1721                 * which case user_mode() is now true, and the event
1722                 * gets dispatched spuriously.
1723                 */
1724                current->ipipe_flags &= ~PF_EVTRET;
1725                __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
1726        }
1727
1728        if (this_domain == ipipe_root_domain) {
1729                set_thread_flag(TIF_IRQ_SYNC);
1730                if (!s) {
1731                        __clear_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1732                        return !test_bit(IPIPE_STALL_FLAG, &p->status);
1733                }
1734        }
1735
1736        return 0;
1737}
1738
1739#endif /* CONFIG_IPIPE */
1740