linux/arch/mips/bcm63xx/irq.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
   7 * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
   8 */
   9
  10#include <linux/kernel.h>
  11#include <linux/init.h>
  12#include <linux/interrupt.h>
  13#include <linux/module.h>
  14#include <linux/irq.h>
  15#include <linux/spinlock.h>
  16#include <asm/irq_cpu.h>
  17#include <asm/mipsregs.h>
  18#include <bcm63xx_cpu.h>
  19#include <bcm63xx_regs.h>
  20#include <bcm63xx_io.h>
  21#include <bcm63xx_irq.h>
  22
  23
  24static DEFINE_SPINLOCK(ipic_lock);
  25static DEFINE_SPINLOCK(epic_lock);
  26
  27static u32 irq_stat_addr[2];
  28static u32 irq_mask_addr[2];
  29static void (*dispatch_internal)(int cpu);
  30static int is_ext_irq_cascaded;
  31static unsigned int ext_irq_count;
  32static unsigned int ext_irq_start, ext_irq_end;
  33static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
  34static void (*internal_irq_mask)(struct irq_data *d);
  35static void (*internal_irq_unmask)(struct irq_data *d, const struct cpumask *m);
  36
  37
  38static inline u32 get_ext_irq_perf_reg(int irq)
  39{
  40        if (irq < 4)
  41                return ext_irq_cfg_reg1;
  42        return ext_irq_cfg_reg2;
  43}
  44
  45static inline void handle_internal(int intbit)
  46{
  47        if (is_ext_irq_cascaded &&
  48            intbit >= ext_irq_start && intbit <= ext_irq_end)
  49                do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
  50        else
  51                do_IRQ(intbit + IRQ_INTERNAL_BASE);
  52}
  53
  54static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
  55                                     const struct cpumask *m)
  56{
  57        bool enable = cpu_online(cpu);
  58
  59#ifdef CONFIG_SMP
  60        if (m)
  61                enable &= cpu_isset(cpu, *m);
  62        else if (irqd_affinity_was_set(d))
  63                enable &= cpu_isset(cpu, *d->affinity);
  64#endif
  65        return enable;
  66}
  67
  68/*
  69 * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
  70 * prioritize any interrupt relatively to another. the static counter
  71 * will resume the loop where it ended the last time we left this
  72 * function.
  73 */
  74
  75#define BUILD_IPIC_INTERNAL(width)                                      \
  76void __dispatch_internal_##width(int cpu)                               \
  77{                                                                       \
  78        u32 pending[width / 32];                                        \
  79        unsigned int src, tgt;                                          \
  80        bool irqs_pending = false;                                      \
  81        static unsigned int i[2];                                       \
  82        unsigned int *next = &i[cpu];                                   \
  83        unsigned long flags;                                            \
  84                                                                        \
  85        /* read registers in reverse order */                           \
  86        spin_lock_irqsave(&ipic_lock, flags);                           \
  87        for (src = 0, tgt = (width / 32); src < (width / 32); src++) {  \
  88                u32 val;                                                \
  89                                                                        \
  90                val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
  91                val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
  92                pending[--tgt] = val;                                   \
  93                                                                        \
  94                if (val)                                                \
  95                        irqs_pending = true;                            \
  96        }                                                               \
  97        spin_unlock_irqrestore(&ipic_lock, flags);                      \
  98                                                                        \
  99        if (!irqs_pending)                                              \
 100                return;                                                 \
 101                                                                        \
 102        while (1) {                                                     \
 103                unsigned int to_call = *next;                           \
 104                                                                        \
 105                *next = (*next + 1) & (width - 1);                      \
 106                if (pending[to_call / 32] & (1 << (to_call & 0x1f))) {  \
 107                        handle_internal(to_call);                       \
 108                        break;                                          \
 109                }                                                       \
 110        }                                                               \
 111}                                                                       \
 112                                                                        \
 113static void __internal_irq_mask_##width(struct irq_data *d)             \
 114{                                                                       \
 115        u32 val;                                                        \
 116        unsigned irq = d->irq - IRQ_INTERNAL_BASE;                      \
 117        unsigned reg = (irq / 32) ^ (width/32 - 1);                     \
 118        unsigned bit = irq & 0x1f;                                      \
 119        unsigned long flags;                                            \
 120        int cpu;                                                        \
 121                                                                        \
 122        spin_lock_irqsave(&ipic_lock, flags);                           \
 123        for_each_present_cpu(cpu) {                                     \
 124                if (!irq_mask_addr[cpu])                                \
 125                        break;                                          \
 126                                                                        \
 127                val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
 128                val &= ~(1 << bit);                                     \
 129                bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
 130        }                                                               \
 131        spin_unlock_irqrestore(&ipic_lock, flags);                      \
 132}                                                                       \
 133                                                                        \
 134static void __internal_irq_unmask_##width(struct irq_data *d,           \
 135                                          const struct cpumask *m)      \
 136{                                                                       \
 137        u32 val;                                                        \
 138        unsigned irq = d->irq - IRQ_INTERNAL_BASE;                      \
 139        unsigned reg = (irq / 32) ^ (width/32 - 1);                     \
 140        unsigned bit = irq & 0x1f;                                      \
 141        unsigned long flags;                                            \
 142        int cpu;                                                        \
 143                                                                        \
 144        spin_lock_irqsave(&ipic_lock, flags);                           \
 145        for_each_present_cpu(cpu) {                                     \
 146                if (!irq_mask_addr[cpu])                                \
 147                        break;                                          \
 148                                                                        \
 149                val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
 150                if (enable_irq_for_cpu(cpu, d, m))                      \
 151                        val |= (1 << bit);                              \
 152                else                                                    \
 153                        val &= ~(1 << bit);                             \
 154                bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
 155        }                                                               \
 156        spin_unlock_irqrestore(&ipic_lock, flags);                      \
 157}
 158
 159BUILD_IPIC_INTERNAL(32);
 160BUILD_IPIC_INTERNAL(64);
 161
 162asmlinkage void plat_irq_dispatch(void)
 163{
 164        u32 cause;
 165
 166        do {
 167                cause = read_c0_cause() & read_c0_status() & ST0_IM;
 168
 169                if (!cause)
 170                        break;
 171
 172                if (cause & CAUSEF_IP7)
 173                        do_IRQ(7);
 174                if (cause & CAUSEF_IP0)
 175                        do_IRQ(0);
 176                if (cause & CAUSEF_IP1)
 177                        do_IRQ(1);
 178                if (cause & CAUSEF_IP2)
 179                        dispatch_internal(0);
 180                if (is_ext_irq_cascaded) {
 181                        if (cause & CAUSEF_IP3)
 182                                dispatch_internal(1);
 183                } else {
 184                        if (cause & CAUSEF_IP3)
 185                                do_IRQ(IRQ_EXT_0);
 186                        if (cause & CAUSEF_IP4)
 187                                do_IRQ(IRQ_EXT_1);
 188                        if (cause & CAUSEF_IP5)
 189                                do_IRQ(IRQ_EXT_2);
 190                        if (cause & CAUSEF_IP6)
 191                                do_IRQ(IRQ_EXT_3);
 192                }
 193        } while (1);
 194}
 195
 196/*
 197 * internal IRQs operations: only mask/unmask on PERF irq mask
 198 * register.
 199 */
 200static void bcm63xx_internal_irq_mask(struct irq_data *d)
 201{
 202        internal_irq_mask(d);
 203}
 204
 205static void bcm63xx_internal_irq_unmask(struct irq_data *d)
 206{
 207        internal_irq_unmask(d, NULL);
 208}
 209
 210/*
 211 * external IRQs operations: mask/unmask and clear on PERF external
 212 * irq control register.
 213 */
 214static void bcm63xx_external_irq_mask(struct irq_data *d)
 215{
 216        unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
 217        u32 reg, regaddr;
 218        unsigned long flags;
 219
 220        regaddr = get_ext_irq_perf_reg(irq);
 221        spin_lock_irqsave(&epic_lock, flags);
 222        reg = bcm_perf_readl(regaddr);
 223
 224        if (BCMCPU_IS_6348())
 225                reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
 226        else
 227                reg &= ~EXTIRQ_CFG_MASK(irq % 4);
 228
 229        bcm_perf_writel(reg, regaddr);
 230        spin_unlock_irqrestore(&epic_lock, flags);
 231
 232        if (is_ext_irq_cascaded)
 233                internal_irq_mask(irq_get_irq_data(irq + ext_irq_start));
 234}
 235
 236static void bcm63xx_external_irq_unmask(struct irq_data *d)
 237{
 238        unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
 239        u32 reg, regaddr;
 240        unsigned long flags;
 241
 242        regaddr = get_ext_irq_perf_reg(irq);
 243        spin_lock_irqsave(&epic_lock, flags);
 244        reg = bcm_perf_readl(regaddr);
 245
 246        if (BCMCPU_IS_6348())
 247                reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
 248        else
 249                reg |= EXTIRQ_CFG_MASK(irq % 4);
 250
 251        bcm_perf_writel(reg, regaddr);
 252        spin_unlock_irqrestore(&epic_lock, flags);
 253
 254        if (is_ext_irq_cascaded)
 255                internal_irq_unmask(irq_get_irq_data(irq + ext_irq_start),
 256                                    NULL);
 257}
 258
 259static void bcm63xx_external_irq_clear(struct irq_data *d)
 260{
 261        unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
 262        u32 reg, regaddr;
 263        unsigned long flags;
 264
 265        regaddr = get_ext_irq_perf_reg(irq);
 266        spin_lock_irqsave(&epic_lock, flags);
 267        reg = bcm_perf_readl(regaddr);
 268
 269        if (BCMCPU_IS_6348())
 270                reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
 271        else
 272                reg |= EXTIRQ_CFG_CLEAR(irq % 4);
 273
 274        bcm_perf_writel(reg, regaddr);
 275        spin_unlock_irqrestore(&epic_lock, flags);
 276}
 277
 278static int bcm63xx_external_irq_set_type(struct irq_data *d,
 279                                         unsigned int flow_type)
 280{
 281        unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
 282        u32 reg, regaddr;
 283        int levelsense, sense, bothedge;
 284        unsigned long flags;
 285
 286        flow_type &= IRQ_TYPE_SENSE_MASK;
 287
 288        if (flow_type == IRQ_TYPE_NONE)
 289                flow_type = IRQ_TYPE_LEVEL_LOW;
 290
 291        levelsense = sense = bothedge = 0;
 292        switch (flow_type) {
 293        case IRQ_TYPE_EDGE_BOTH:
 294                bothedge = 1;
 295                break;
 296
 297        case IRQ_TYPE_EDGE_RISING:
 298                sense = 1;
 299                break;
 300
 301        case IRQ_TYPE_EDGE_FALLING:
 302                break;
 303
 304        case IRQ_TYPE_LEVEL_HIGH:
 305                levelsense = 1;
 306                sense = 1;
 307                break;
 308
 309        case IRQ_TYPE_LEVEL_LOW:
 310                levelsense = 1;
 311                break;
 312
 313        default:
 314                printk(KERN_ERR "bogus flow type combination given !\n");
 315                return -EINVAL;
 316        }
 317
 318        regaddr = get_ext_irq_perf_reg(irq);
 319        spin_lock_irqsave(&epic_lock, flags);
 320        reg = bcm_perf_readl(regaddr);
 321        irq %= 4;
 322
 323        switch (bcm63xx_get_cpu_id()) {
 324        case BCM6348_CPU_ID:
 325                if (levelsense)
 326                        reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
 327                else
 328                        reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
 329                if (sense)
 330                        reg |= EXTIRQ_CFG_SENSE_6348(irq);
 331                else
 332                        reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
 333                if (bothedge)
 334                        reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
 335                else
 336                        reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
 337                break;
 338
 339        case BCM3368_CPU_ID:
 340        case BCM6328_CPU_ID:
 341        case BCM6338_CPU_ID:
 342        case BCM6345_CPU_ID:
 343        case BCM6358_CPU_ID:
 344        case BCM6362_CPU_ID:
 345        case BCM6368_CPU_ID:
 346                if (levelsense)
 347                        reg |= EXTIRQ_CFG_LEVELSENSE(irq);
 348                else
 349                        reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
 350                if (sense)
 351                        reg |= EXTIRQ_CFG_SENSE(irq);
 352                else
 353                        reg &= ~EXTIRQ_CFG_SENSE(irq);
 354                if (bothedge)
 355                        reg |= EXTIRQ_CFG_BOTHEDGE(irq);
 356                else
 357                        reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
 358                break;
 359        default:
 360                BUG();
 361        }
 362
 363        bcm_perf_writel(reg, regaddr);
 364        spin_unlock_irqrestore(&epic_lock, flags);
 365
 366        irqd_set_trigger_type(d, flow_type);
 367        if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
 368                __irq_set_handler_locked(d->irq, handle_level_irq);
 369        else
 370                __irq_set_handler_locked(d->irq, handle_edge_irq);
 371
 372        return IRQ_SET_MASK_OK_NOCOPY;
 373}
 374
 375#ifdef CONFIG_SMP
 376static int bcm63xx_internal_set_affinity(struct irq_data *data,
 377                                         const struct cpumask *dest,
 378                                         bool force)
 379{
 380        if (!irqd_irq_disabled(data))
 381                internal_irq_unmask(data, dest);
 382
 383        return 0;
 384}
 385#endif
 386
 387static struct irq_chip bcm63xx_internal_irq_chip = {
 388        .name           = "bcm63xx_ipic",
 389        .irq_mask       = bcm63xx_internal_irq_mask,
 390        .irq_unmask     = bcm63xx_internal_irq_unmask,
 391};
 392
 393static struct irq_chip bcm63xx_external_irq_chip = {
 394        .name           = "bcm63xx_epic",
 395        .irq_ack        = bcm63xx_external_irq_clear,
 396
 397        .irq_mask       = bcm63xx_external_irq_mask,
 398        .irq_unmask     = bcm63xx_external_irq_unmask,
 399
 400        .irq_set_type   = bcm63xx_external_irq_set_type,
 401};
 402
 403static struct irqaction cpu_ip2_cascade_action = {
 404        .handler        = no_action,
 405        .name           = "cascade_ip2",
 406        .flags          = IRQF_NO_THREAD,
 407};
 408
 409#ifdef CONFIG_SMP
 410static struct irqaction cpu_ip3_cascade_action = {
 411        .handler        = no_action,
 412        .name           = "cascade_ip3",
 413        .flags          = IRQF_NO_THREAD,
 414};
 415#endif
 416
 417static struct irqaction cpu_ext_cascade_action = {
 418        .handler        = no_action,
 419        .name           = "cascade_extirq",
 420        .flags          = IRQF_NO_THREAD,
 421};
 422
 423static void bcm63xx_init_irq(void)
 424{
 425        int irq_bits;
 426
 427        irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
 428        irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
 429        irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
 430        irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);
 431
 432        switch (bcm63xx_get_cpu_id()) {
 433        case BCM3368_CPU_ID:
 434                irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
 435                irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
 436                irq_stat_addr[1] = 0;
 437                irq_mask_addr[1] = 0;
 438                irq_bits = 32;
 439                ext_irq_count = 4;
 440                ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
 441                break;
 442        case BCM6328_CPU_ID:
 443                irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
 444                irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
 445                irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
 446                irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1);
 447                irq_bits = 64;
 448                ext_irq_count = 4;
 449                is_ext_irq_cascaded = 1;
 450                ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
 451                ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
 452                ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
 453                break;
 454        case BCM6338_CPU_ID:
 455                irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
 456                irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
 457                irq_stat_addr[1] = 0;
 458                irq_mask_addr[1] = 0;
 459                irq_bits = 32;
 460                ext_irq_count = 4;
 461                ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
 462                break;
 463        case BCM6345_CPU_ID:
 464                irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
 465                irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
 466                irq_stat_addr[1] = 0;
 467                irq_mask_addr[1] = 0;
 468                irq_bits = 32;
 469                ext_irq_count = 4;
 470                ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
 471                break;
 472        case BCM6348_CPU_ID:
 473                irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
 474                irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
 475                irq_stat_addr[1] = 0;
 476                irq_mask_addr[1] = 0;
 477                irq_bits = 32;
 478                ext_irq_count = 4;
 479                ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
 480                break;
 481        case BCM6358_CPU_ID:
 482                irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
 483                irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
 484                irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
 485                irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
 486                irq_bits = 32;
 487                ext_irq_count = 4;
 488                is_ext_irq_cascaded = 1;
 489                ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
 490                ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
 491                ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
 492                break;
 493        case BCM6362_CPU_ID:
 494                irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
 495                irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
 496                irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
 497                irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
 498                irq_bits = 64;
 499                ext_irq_count = 4;
 500                is_ext_irq_cascaded = 1;
 501                ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
 502                ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
 503                ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
 504                break;
 505        case BCM6368_CPU_ID:
 506                irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
 507                irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
 508                irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
 509                irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
 510                irq_bits = 64;
 511                ext_irq_count = 6;
 512                is_ext_irq_cascaded = 1;
 513                ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
 514                ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
 515                ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
 516                ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
 517                break;
 518        default:
 519                BUG();
 520        }
 521
 522        if (irq_bits == 32) {
 523                dispatch_internal = __dispatch_internal_32;
 524                internal_irq_mask = __internal_irq_mask_32;
 525                internal_irq_unmask = __internal_irq_unmask_32;
 526        } else {
 527                dispatch_internal = __dispatch_internal_64;
 528                internal_irq_mask = __internal_irq_mask_64;
 529                internal_irq_unmask = __internal_irq_unmask_64;
 530        }
 531}
 532
 533void __init arch_init_irq(void)
 534{
 535        int i;
 536
 537        bcm63xx_init_irq();
 538        mips_cpu_irq_init();
 539        for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
 540                irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
 541                                         handle_level_irq);
 542
 543        for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
 544                irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
 545                                         handle_edge_irq);
 546
 547        if (!is_ext_irq_cascaded) {
 548                for (i = 3; i < 3 + ext_irq_count; ++i)
 549                        setup_irq(MIPS_CPU_IRQ_BASE + i, &cpu_ext_cascade_action);
 550        }
 551
 552        setup_irq(MIPS_CPU_IRQ_BASE + 2, &cpu_ip2_cascade_action);
 553#ifdef CONFIG_SMP
 554        if (is_ext_irq_cascaded) {
 555                setup_irq(MIPS_CPU_IRQ_BASE + 3, &cpu_ip3_cascade_action);
 556                bcm63xx_internal_irq_chip.irq_set_affinity =
 557                        bcm63xx_internal_set_affinity;
 558
 559                cpumask_clear(irq_default_affinity);
 560                cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
 561        }
 562#endif
 563}
 564