linux/arch/mips/cavium-octeon/octeon-irq.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 2004-2014 Cavium, Inc.
   7 */
   8
   9#include <linux/of_address.h>
  10#include <linux/interrupt.h>
  11#include <linux/irqdomain.h>
  12#include <linux/bitops.h>
  13#include <linux/of_irq.h>
  14#include <linux/percpu.h>
  15#include <linux/slab.h>
  16#include <linux/irq.h>
  17#include <linux/smp.h>
  18#include <linux/of.h>
  19
  20#include <asm/octeon/octeon.h>
  21#include <asm/octeon/cvmx-ciu2-defs.h>
  22
  23static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
  24static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
  25static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
  26
  27struct octeon_irq_ciu_domain_data {
  28        int num_sum;  /* number of sum registers (2 or 3). */
  29};
  30
  31static __read_mostly u8 octeon_irq_ciu_to_irq[8][64];
  32
  33struct octeon_ciu_chip_data {
  34        union {
  35                struct {                /* only used for ciu3 */
  36                        u64 ciu3_addr;
  37                        unsigned int intsn;
  38                };
  39                struct {                /* only used for ciu/ciu2 */
  40                        u8 line;
  41                        u8 bit;
  42                        u8 gpio_line;
  43                };
  44        };
  45        int current_cpu;        /* Next CPU expected to take this irq */
  46};
  47
  48struct octeon_core_chip_data {
  49        struct mutex core_irq_mutex;
  50        bool current_en;
  51        bool desired_en;
  52        u8 bit;
  53};
  54
  55#define MIPS_CORE_IRQ_LINES 8
  56
  57static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
  58
  59static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
  60                                      struct irq_chip *chip,
  61                                      irq_flow_handler_t handler)
  62{
  63        struct octeon_ciu_chip_data *cd;
  64
  65        cd = kzalloc(sizeof(*cd), GFP_KERNEL);
  66        if (!cd)
  67                return -ENOMEM;
  68
  69        irq_set_chip_and_handler(irq, chip, handler);
  70
  71        cd->line = line;
  72        cd->bit = bit;
  73        cd->gpio_line = gpio_line;
  74
  75        irq_set_chip_data(irq, cd);
  76        octeon_irq_ciu_to_irq[line][bit] = irq;
  77        return 0;
  78}
  79
  80static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq)
  81{
  82        struct irq_data *data = irq_get_irq_data(irq);
  83        struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
  84
  85        irq_set_chip_data(irq, NULL);
  86        kfree(cd);
  87}
  88
  89static int octeon_irq_force_ciu_mapping(struct irq_domain *domain,
  90                                        int irq, int line, int bit)
  91{
  92        return irq_domain_associate(domain, irq, line << 6 | bit);
  93}
  94
  95static int octeon_coreid_for_cpu(int cpu)
  96{
  97#ifdef CONFIG_SMP
  98        return cpu_logical_map(cpu);
  99#else
 100        return cvmx_get_core_num();
 101#endif
 102}
 103
 104static int octeon_cpu_for_coreid(int coreid)
 105{
 106#ifdef CONFIG_SMP
 107        return cpu_number_map(coreid);
 108#else
 109        return smp_processor_id();
 110#endif
 111}
 112
 113static void octeon_irq_core_ack(struct irq_data *data)
 114{
 115        struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
 116        unsigned int bit = cd->bit;
 117
 118        /*
 119         * We don't need to disable IRQs to make these atomic since
 120         * they are already disabled earlier in the low level
 121         * interrupt code.
 122         */
 123        clear_c0_status(0x100 << bit);
 124        /* The two user interrupts must be cleared manually. */
 125        if (bit < 2)
 126                clear_c0_cause(0x100 << bit);
 127}
 128
 129static void octeon_irq_core_eoi(struct irq_data *data)
 130{
 131        struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
 132
 133        /*
 134         * We don't need to disable IRQs to make these atomic since
 135         * they are already disabled earlier in the low level
 136         * interrupt code.
 137         */
 138        set_c0_status(0x100 << cd->bit);
 139}
 140
 141static void octeon_irq_core_set_enable_local(void *arg)
 142{
 143        struct irq_data *data = arg;
 144        struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
 145        unsigned int mask = 0x100 << cd->bit;
 146
 147        /*
 148         * Interrupts are already disabled, so these are atomic.
 149         */
 150        if (cd->desired_en)
 151                set_c0_status(mask);
 152        else
 153                clear_c0_status(mask);
 154
 155}
 156
 157static void octeon_irq_core_disable(struct irq_data *data)
 158{
 159        struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
 160        cd->desired_en = false;
 161}
 162
 163static void octeon_irq_core_enable(struct irq_data *data)
 164{
 165        struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
 166        cd->desired_en = true;
 167}
 168
 169static void octeon_irq_core_bus_lock(struct irq_data *data)
 170{
 171        struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
 172
 173        mutex_lock(&cd->core_irq_mutex);
 174}
 175
 176static void octeon_irq_core_bus_sync_unlock(struct irq_data *data)
 177{
 178        struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
 179
 180        if (cd->desired_en != cd->current_en) {
 181                on_each_cpu(octeon_irq_core_set_enable_local, data, 1);
 182
 183                cd->current_en = cd->desired_en;
 184        }
 185
 186        mutex_unlock(&cd->core_irq_mutex);
 187}
 188
 189static struct irq_chip octeon_irq_chip_core = {
 190        .name = "Core",
 191        .irq_enable = octeon_irq_core_enable,
 192        .irq_disable = octeon_irq_core_disable,
 193        .irq_ack = octeon_irq_core_ack,
 194        .irq_eoi = octeon_irq_core_eoi,
 195        .irq_bus_lock = octeon_irq_core_bus_lock,
 196        .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock,
 197
 198        .irq_cpu_online = octeon_irq_core_eoi,
 199        .irq_cpu_offline = octeon_irq_core_ack,
 200        .flags = IRQCHIP_ONOFFLINE_ENABLED,
 201};
 202
 203static void __init octeon_irq_init_core(void)
 204{
 205        int i;
 206        int irq;
 207        struct octeon_core_chip_data *cd;
 208
 209        for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) {
 210                cd = &octeon_irq_core_chip_data[i];
 211                cd->current_en = false;
 212                cd->desired_en = false;
 213                cd->bit = i;
 214                mutex_init(&cd->core_irq_mutex);
 215
 216                irq = OCTEON_IRQ_SW0 + i;
 217                irq_set_chip_data(irq, cd);
 218                irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
 219                                         handle_percpu_irq);
 220        }
 221}
 222
 223static int next_cpu_for_irq(struct irq_data *data)
 224{
 225
 226#ifdef CONFIG_SMP
 227        int cpu;
 228        struct cpumask *mask = irq_data_get_affinity_mask(data);
 229        int weight = cpumask_weight(mask);
 230        struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
 231
 232        if (weight > 1) {
 233                cpu = cd->current_cpu;
 234                for (;;) {
 235                        cpu = cpumask_next(cpu, mask);
 236                        if (cpu >= nr_cpu_ids) {
 237                                cpu = -1;
 238                                continue;
 239                        } else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
 240                                break;
 241                        }
 242                }
 243        } else if (weight == 1) {
 244                cpu = cpumask_first(mask);
 245        } else {
 246                cpu = smp_processor_id();
 247        }
 248        cd->current_cpu = cpu;
 249        return cpu;
 250#else
 251        return smp_processor_id();
 252#endif
 253}
 254
 255static void octeon_irq_ciu_enable(struct irq_data *data)
 256{
 257        int cpu = next_cpu_for_irq(data);
 258        int coreid = octeon_coreid_for_cpu(cpu);
 259        unsigned long *pen;
 260        unsigned long flags;
 261        struct octeon_ciu_chip_data *cd;
 262        raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
 263
 264        cd = irq_data_get_irq_chip_data(data);
 265
 266        raw_spin_lock_irqsave(lock, flags);
 267        if (cd->line == 0) {
 268                pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
 269                __set_bit(cd->bit, pen);
 270                /*
 271                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
 272                 * enabling the irq.
 273                 */
 274                wmb();
 275                cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
 276        } else {
 277                pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
 278                __set_bit(cd->bit, pen);
 279                /*
 280                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
 281                 * enabling the irq.
 282                 */
 283                wmb();
 284                cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
 285        }
 286        raw_spin_unlock_irqrestore(lock, flags);
 287}
 288
 289static void octeon_irq_ciu_enable_local(struct irq_data *data)
 290{
 291        unsigned long *pen;
 292        unsigned long flags;
 293        struct octeon_ciu_chip_data *cd;
 294        raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
 295
 296        cd = irq_data_get_irq_chip_data(data);
 297
 298        raw_spin_lock_irqsave(lock, flags);
 299        if (cd->line == 0) {
 300                pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
 301                __set_bit(cd->bit, pen);
 302                /*
 303                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
 304                 * enabling the irq.
 305                 */
 306                wmb();
 307                cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
 308        } else {
 309                pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
 310                __set_bit(cd->bit, pen);
 311                /*
 312                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
 313                 * enabling the irq.
 314                 */
 315                wmb();
 316                cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
 317        }
 318        raw_spin_unlock_irqrestore(lock, flags);
 319}
 320
 321static void octeon_irq_ciu_disable_local(struct irq_data *data)
 322{
 323        unsigned long *pen;
 324        unsigned long flags;
 325        struct octeon_ciu_chip_data *cd;
 326        raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
 327
 328        cd = irq_data_get_irq_chip_data(data);
 329
 330        raw_spin_lock_irqsave(lock, flags);
 331        if (cd->line == 0) {
 332                pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
 333                __clear_bit(cd->bit, pen);
 334                /*
 335                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
 336                 * enabling the irq.
 337                 */
 338                wmb();
 339                cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
 340        } else {
 341                pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
 342                __clear_bit(cd->bit, pen);
 343                /*
 344                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
 345                 * enabling the irq.
 346                 */
 347                wmb();
 348                cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
 349        }
 350        raw_spin_unlock_irqrestore(lock, flags);
 351}
 352
 353static void octeon_irq_ciu_disable_all(struct irq_data *data)
 354{
 355        unsigned long flags;
 356        unsigned long *pen;
 357        int cpu;
 358        struct octeon_ciu_chip_data *cd;
 359        raw_spinlock_t *lock;
 360
 361        cd = irq_data_get_irq_chip_data(data);
 362
 363        for_each_online_cpu(cpu) {
 364                int coreid = octeon_coreid_for_cpu(cpu);
 365                lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
 366                if (cd->line == 0)
 367                        pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
 368                else
 369                        pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
 370
 371                raw_spin_lock_irqsave(lock, flags);
 372                __clear_bit(cd->bit, pen);
 373                /*
 374                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
 375                 * enabling the irq.
 376                 */
 377                wmb();
 378                if (cd->line == 0)
 379                        cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
 380                else
 381                        cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
 382                raw_spin_unlock_irqrestore(lock, flags);
 383        }
 384}
 385
 386static void octeon_irq_ciu_enable_all(struct irq_data *data)
 387{
 388        unsigned long flags;
 389        unsigned long *pen;
 390        int cpu;
 391        struct octeon_ciu_chip_data *cd;
 392        raw_spinlock_t *lock;
 393
 394        cd = irq_data_get_irq_chip_data(data);
 395
 396        for_each_online_cpu(cpu) {
 397                int coreid = octeon_coreid_for_cpu(cpu);
 398                lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
 399                if (cd->line == 0)
 400                        pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
 401                else
 402                        pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
 403
 404                raw_spin_lock_irqsave(lock, flags);
 405                __set_bit(cd->bit, pen);
 406                /*
 407                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
 408                 * enabling the irq.
 409                 */
 410                wmb();
 411                if (cd->line == 0)
 412                        cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
 413                else
 414                        cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
 415                raw_spin_unlock_irqrestore(lock, flags);
 416        }
 417}
 418
 419/*
 420 * Enable the irq on the next core in the affinity set for chips that
 421 * have the EN*_W1{S,C} registers.
 422 */
 423static void octeon_irq_ciu_enable_v2(struct irq_data *data)
 424{
 425        u64 mask;
 426        int cpu = next_cpu_for_irq(data);
 427        struct octeon_ciu_chip_data *cd;
 428
 429        cd = irq_data_get_irq_chip_data(data);
 430        mask = 1ull << (cd->bit);
 431
 432        /*
 433         * Called under the desc lock, so these should never get out
 434         * of sync.
 435         */
 436        if (cd->line == 0) {
 437                int index = octeon_coreid_for_cpu(cpu) * 2;
 438                set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
 439                cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
 440        } else {
 441                int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
 442                set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
 443                cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
 444        }
 445}
 446
 447/*
 448 * Enable the irq in the sum2 registers.
 449 */
 450static void octeon_irq_ciu_enable_sum2(struct irq_data *data)
 451{
 452        u64 mask;
 453        int cpu = next_cpu_for_irq(data);
 454        int index = octeon_coreid_for_cpu(cpu);
 455        struct octeon_ciu_chip_data *cd;
 456
 457        cd = irq_data_get_irq_chip_data(data);
 458        mask = 1ull << (cd->bit);
 459
 460        cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
 461}
 462
 463/*
 464 * Disable the irq in the sum2 registers.
 465 */
 466static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data)
 467{
 468        u64 mask;
 469        int cpu = next_cpu_for_irq(data);
 470        int index = octeon_coreid_for_cpu(cpu);
 471        struct octeon_ciu_chip_data *cd;
 472
 473        cd = irq_data_get_irq_chip_data(data);
 474        mask = 1ull << (cd->bit);
 475
 476        cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
 477}
 478
 479static void octeon_irq_ciu_ack_sum2(struct irq_data *data)
 480{
 481        u64 mask;
 482        int cpu = next_cpu_for_irq(data);
 483        int index = octeon_coreid_for_cpu(cpu);
 484        struct octeon_ciu_chip_data *cd;
 485
 486        cd = irq_data_get_irq_chip_data(data);
 487        mask = 1ull << (cd->bit);
 488
 489        cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask);
 490}
 491
 492static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data)
 493{
 494        int cpu;
 495        struct octeon_ciu_chip_data *cd;
 496        u64 mask;
 497
 498        cd = irq_data_get_irq_chip_data(data);
 499        mask = 1ull << (cd->bit);
 500
 501        for_each_online_cpu(cpu) {
 502                int coreid = octeon_coreid_for_cpu(cpu);
 503
 504                cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask);
 505        }
 506}
 507
 508/*
 509 * Enable the irq on the current CPU for chips that
 510 * have the EN*_W1{S,C} registers.
 511 */
 512static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
 513{
 514        u64 mask;
 515        struct octeon_ciu_chip_data *cd;
 516
 517        cd = irq_data_get_irq_chip_data(data);
 518        mask = 1ull << (cd->bit);
 519
 520        if (cd->line == 0) {
 521                int index = cvmx_get_core_num() * 2;
 522                set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
 523                cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
 524        } else {
 525                int index = cvmx_get_core_num() * 2 + 1;
 526                set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
 527                cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
 528        }
 529}
 530
 531static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
 532{
 533        u64 mask;
 534        struct octeon_ciu_chip_data *cd;
 535
 536        cd = irq_data_get_irq_chip_data(data);
 537        mask = 1ull << (cd->bit);
 538
 539        if (cd->line == 0) {
 540                int index = cvmx_get_core_num() * 2;
 541                clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
 542                cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
 543        } else {
 544                int index = cvmx_get_core_num() * 2 + 1;
 545                clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
 546                cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
 547        }
 548}
 549
 550/*
 551 * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
 552 */
 553static void octeon_irq_ciu_ack(struct irq_data *data)
 554{
 555        u64 mask;
 556        struct octeon_ciu_chip_data *cd;
 557
 558        cd = irq_data_get_irq_chip_data(data);
 559        mask = 1ull << (cd->bit);
 560
 561        if (cd->line == 0) {
 562                int index = cvmx_get_core_num() * 2;
 563                cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
 564        } else {
 565                cvmx_write_csr(CVMX_CIU_INT_SUM1, mask);
 566        }
 567}
 568
 569/*
 570 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
 571 * registers.
 572 */
 573static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
 574{
 575        int cpu;
 576        u64 mask;
 577        struct octeon_ciu_chip_data *cd;
 578
 579        cd = irq_data_get_irq_chip_data(data);
 580        mask = 1ull << (cd->bit);
 581
 582        if (cd->line == 0) {
 583                for_each_online_cpu(cpu) {
 584                        int index = octeon_coreid_for_cpu(cpu) * 2;
 585                        clear_bit(cd->bit,
 586                                &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
 587                        cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
 588                }
 589        } else {
 590                for_each_online_cpu(cpu) {
 591                        int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
 592                        clear_bit(cd->bit,
 593                                &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
 594                        cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
 595                }
 596        }
 597}
 598
 599/*
 600 * Enable the irq on the all cores for chips that have the EN*_W1{S,C}
 601 * registers.
 602 */
 603static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
 604{
 605        int cpu;
 606        u64 mask;
 607        struct octeon_ciu_chip_data *cd;
 608
 609        cd = irq_data_get_irq_chip_data(data);
 610        mask = 1ull << (cd->bit);
 611
 612        if (cd->line == 0) {
 613                for_each_online_cpu(cpu) {
 614                        int index = octeon_coreid_for_cpu(cpu) * 2;
 615                        set_bit(cd->bit,
 616                                &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
 617                        cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
 618                }
 619        } else {
 620                for_each_online_cpu(cpu) {
 621                        int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
 622                        set_bit(cd->bit,
 623                                &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
 624                        cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
 625                }
 626        }
 627}
 628
 629static void octeon_irq_gpio_setup(struct irq_data *data)
 630{
 631        union cvmx_gpio_bit_cfgx cfg;
 632        struct octeon_ciu_chip_data *cd;
 633        u32 t = irqd_get_trigger_type(data);
 634
 635        cd = irq_data_get_irq_chip_data(data);
 636
 637        cfg.u64 = 0;
 638        cfg.s.int_en = 1;
 639        cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0;
 640        cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0;
 641
 642        /* 140 nS glitch filter*/
 643        cfg.s.fil_cnt = 7;
 644        cfg.s.fil_sel = 3;
 645
 646        cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64);
 647}
 648
 649static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data)
 650{
 651        octeon_irq_gpio_setup(data);
 652        octeon_irq_ciu_enable_v2(data);
 653}
 654
 655static void octeon_irq_ciu_enable_gpio(struct irq_data *data)
 656{
 657        octeon_irq_gpio_setup(data);
 658        octeon_irq_ciu_enable(data);
 659}
 660
 661static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
 662{
 663        irqd_set_trigger_type(data, t);
 664        octeon_irq_gpio_setup(data);
 665
 666        if (irqd_get_trigger_type(data) & IRQ_TYPE_EDGE_BOTH)
 667                irq_set_handler_locked(data, handle_edge_irq);
 668        else
 669                irq_set_handler_locked(data, handle_level_irq);
 670
 671        return IRQ_SET_MASK_OK;
 672}
 673
 674static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data)
 675{
 676        struct octeon_ciu_chip_data *cd;
 677
 678        cd = irq_data_get_irq_chip_data(data);
 679        cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
 680
 681        octeon_irq_ciu_disable_all_v2(data);
 682}
 683
 684static void octeon_irq_ciu_disable_gpio(struct irq_data *data)
 685{
 686        struct octeon_ciu_chip_data *cd;
 687
 688        cd = irq_data_get_irq_chip_data(data);
 689        cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
 690
 691        octeon_irq_ciu_disable_all(data);
 692}
 693
 694static void octeon_irq_ciu_gpio_ack(struct irq_data *data)
 695{
 696        struct octeon_ciu_chip_data *cd;
 697        u64 mask;
 698
 699        cd = irq_data_get_irq_chip_data(data);
 700        mask = 1ull << (cd->gpio_line);
 701
 702        cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
 703}
 704
 705#ifdef CONFIG_SMP
 706
 707static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
 708{
 709        int cpu = smp_processor_id();
 710        cpumask_t new_affinity;
 711        struct cpumask *mask = irq_data_get_affinity_mask(data);
 712
 713        if (!cpumask_test_cpu(cpu, mask))
 714                return;
 715
 716        if (cpumask_weight(mask) > 1) {
 717                /*
 718                 * It has multi CPU affinity, just remove this CPU
 719                 * from the affinity set.
 720                 */
 721                cpumask_copy(&new_affinity, mask);
 722                cpumask_clear_cpu(cpu, &new_affinity);
 723        } else {
 724                /* Otherwise, put it on lowest numbered online CPU. */
 725                cpumask_clear(&new_affinity);
 726                cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
 727        }
 728        irq_set_affinity_locked(data, &new_affinity, false);
 729}
 730
 731static int octeon_irq_ciu_set_affinity(struct irq_data *data,
 732                                       const struct cpumask *dest, bool force)
 733{
 734        int cpu;
 735        bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
 736        unsigned long flags;
 737        struct octeon_ciu_chip_data *cd;
 738        unsigned long *pen;
 739        raw_spinlock_t *lock;
 740
 741        cd = irq_data_get_irq_chip_data(data);
 742
 743        /*
 744         * For non-v2 CIU, we will allow only single CPU affinity.
 745         * This removes the need to do locking in the .ack/.eoi
 746         * functions.
 747         */
 748        if (cpumask_weight(dest) != 1)
 749                return -EINVAL;
 750
 751        if (!enable_one)
 752                return 0;
 753
 754
 755        for_each_online_cpu(cpu) {
 756                int coreid = octeon_coreid_for_cpu(cpu);
 757
 758                lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
 759                raw_spin_lock_irqsave(lock, flags);
 760
 761                if (cd->line == 0)
 762                        pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
 763                else
 764                        pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
 765
 766                if (cpumask_test_cpu(cpu, dest) && enable_one) {
 767                        enable_one = 0;
 768                        __set_bit(cd->bit, pen);
 769                } else {
 770                        __clear_bit(cd->bit, pen);
 771                }
 772                /*
 773                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
 774                 * enabling the irq.
 775                 */
 776                wmb();
 777
 778                if (cd->line == 0)
 779                        cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
 780                else
 781                        cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
 782
 783                raw_spin_unlock_irqrestore(lock, flags);
 784        }
 785        return 0;
 786}
 787
 788/*
 789 * Set affinity for the irq for chips that have the EN*_W1{S,C}
 790 * registers.
 791 */
 792static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
 793                                          const struct cpumask *dest,
 794                                          bool force)
 795{
 796        int cpu;
 797        bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
 798        u64 mask;
 799        struct octeon_ciu_chip_data *cd;
 800
 801        if (!enable_one)
 802                return 0;
 803
 804        cd = irq_data_get_irq_chip_data(data);
 805        mask = 1ull << cd->bit;
 806
 807        if (cd->line == 0) {
 808                for_each_online_cpu(cpu) {
 809                        unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
 810                        int index = octeon_coreid_for_cpu(cpu) * 2;
 811                        if (cpumask_test_cpu(cpu, dest) && enable_one) {
 812                                enable_one = false;
 813                                set_bit(cd->bit, pen);
 814                                cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
 815                        } else {
 816                                clear_bit(cd->bit, pen);
 817                                cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
 818                        }
 819                }
 820        } else {
 821                for_each_online_cpu(cpu) {
 822                        unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
 823                        int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
 824                        if (cpumask_test_cpu(cpu, dest) && enable_one) {
 825                                enable_one = false;
 826                                set_bit(cd->bit, pen);
 827                                cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
 828                        } else {
 829                                clear_bit(cd->bit, pen);
 830                                cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
 831                        }
 832                }
 833        }
 834        return 0;
 835}
 836
 837static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data,
 838                                            const struct cpumask *dest,
 839                                            bool force)
 840{
 841        int cpu;
 842        bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
 843        u64 mask;
 844        struct octeon_ciu_chip_data *cd;
 845
 846        if (!enable_one)
 847                return 0;
 848
 849        cd = irq_data_get_irq_chip_data(data);
 850        mask = 1ull << cd->bit;
 851
 852        for_each_online_cpu(cpu) {
 853                int index = octeon_coreid_for_cpu(cpu);
 854
 855                if (cpumask_test_cpu(cpu, dest) && enable_one) {
 856                        enable_one = false;
 857                        cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
 858                } else {
 859                        cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
 860                }
 861        }
 862        return 0;
 863}
 864#endif
 865
 866/*
 867 * Newer octeon chips have support for lockless CIU operation.
 868 */
 869static struct irq_chip octeon_irq_chip_ciu_v2 = {
 870        .name = "CIU",
 871        .irq_enable = octeon_irq_ciu_enable_v2,
 872        .irq_disable = octeon_irq_ciu_disable_all_v2,
 873        .irq_mask = octeon_irq_ciu_disable_local_v2,
 874        .irq_unmask = octeon_irq_ciu_enable_v2,
 875#ifdef CONFIG_SMP
 876        .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
 877        .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
 878#endif
 879};
 880
 881static struct irq_chip octeon_irq_chip_ciu_v2_edge = {
 882        .name = "CIU",
 883        .irq_enable = octeon_irq_ciu_enable_v2,
 884        .irq_disable = octeon_irq_ciu_disable_all_v2,
 885        .irq_ack = octeon_irq_ciu_ack,
 886        .irq_mask = octeon_irq_ciu_disable_local_v2,
 887        .irq_unmask = octeon_irq_ciu_enable_v2,
 888#ifdef CONFIG_SMP
 889        .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
 890        .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
 891#endif
 892};
 893
 894/*
 895 * Newer octeon chips have support for lockless CIU operation.
 896 */
 897static struct irq_chip octeon_irq_chip_ciu_sum2 = {
 898        .name = "CIU",
 899        .irq_enable = octeon_irq_ciu_enable_sum2,
 900        .irq_disable = octeon_irq_ciu_disable_all_sum2,
 901        .irq_mask = octeon_irq_ciu_disable_local_sum2,
 902        .irq_unmask = octeon_irq_ciu_enable_sum2,
 903#ifdef CONFIG_SMP
 904        .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
 905        .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
 906#endif
 907};
 908
 909static struct irq_chip octeon_irq_chip_ciu_sum2_edge = {
 910        .name = "CIU",
 911        .irq_enable = octeon_irq_ciu_enable_sum2,
 912        .irq_disable = octeon_irq_ciu_disable_all_sum2,
 913        .irq_ack = octeon_irq_ciu_ack_sum2,
 914        .irq_mask = octeon_irq_ciu_disable_local_sum2,
 915        .irq_unmask = octeon_irq_ciu_enable_sum2,
 916#ifdef CONFIG_SMP
 917        .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
 918        .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
 919#endif
 920};
 921
 922static struct irq_chip octeon_irq_chip_ciu = {
 923        .name = "CIU",
 924        .irq_enable = octeon_irq_ciu_enable,
 925        .irq_disable = octeon_irq_ciu_disable_all,
 926        .irq_mask = octeon_irq_ciu_disable_local,
 927        .irq_unmask = octeon_irq_ciu_enable,
 928#ifdef CONFIG_SMP
 929        .irq_set_affinity = octeon_irq_ciu_set_affinity,
 930        .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
 931#endif
 932};
 933
 934static struct irq_chip octeon_irq_chip_ciu_edge = {
 935        .name = "CIU",
 936        .irq_enable = octeon_irq_ciu_enable,
 937        .irq_disable = octeon_irq_ciu_disable_all,
 938        .irq_ack = octeon_irq_ciu_ack,
 939        .irq_mask = octeon_irq_ciu_disable_local,
 940        .irq_unmask = octeon_irq_ciu_enable,
 941#ifdef CONFIG_SMP
 942        .irq_set_affinity = octeon_irq_ciu_set_affinity,
 943        .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
 944#endif
 945};
 946
 947/* The mbox versions don't do any affinity or round-robin. */
 948static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = {
 949        .name = "CIU-M",
 950        .irq_enable = octeon_irq_ciu_enable_all_v2,
 951        .irq_disable = octeon_irq_ciu_disable_all_v2,
 952        .irq_ack = octeon_irq_ciu_disable_local_v2,
 953        .irq_eoi = octeon_irq_ciu_enable_local_v2,
 954
 955        .irq_cpu_online = octeon_irq_ciu_enable_local_v2,
 956        .irq_cpu_offline = octeon_irq_ciu_disable_local_v2,
 957        .flags = IRQCHIP_ONOFFLINE_ENABLED,
 958};
 959
 960static struct irq_chip octeon_irq_chip_ciu_mbox = {
 961        .name = "CIU-M",
 962        .irq_enable = octeon_irq_ciu_enable_all,
 963        .irq_disable = octeon_irq_ciu_disable_all,
 964        .irq_ack = octeon_irq_ciu_disable_local,
 965        .irq_eoi = octeon_irq_ciu_enable_local,
 966
 967        .irq_cpu_online = octeon_irq_ciu_enable_local,
 968        .irq_cpu_offline = octeon_irq_ciu_disable_local,
 969        .flags = IRQCHIP_ONOFFLINE_ENABLED,
 970};
 971
 972static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
 973        .name = "CIU-GPIO",
 974        .irq_enable = octeon_irq_ciu_enable_gpio_v2,
 975        .irq_disable = octeon_irq_ciu_disable_gpio_v2,
 976        .irq_ack = octeon_irq_ciu_gpio_ack,
 977        .irq_mask = octeon_irq_ciu_disable_local_v2,
 978        .irq_unmask = octeon_irq_ciu_enable_v2,
 979        .irq_set_type = octeon_irq_ciu_gpio_set_type,
 980#ifdef CONFIG_SMP
 981        .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
 982        .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
 983#endif
 984        .flags = IRQCHIP_SET_TYPE_MASKED,
 985};
 986
 987static struct irq_chip octeon_irq_chip_ciu_gpio = {
 988        .name = "CIU-GPIO",
 989        .irq_enable = octeon_irq_ciu_enable_gpio,
 990        .irq_disable = octeon_irq_ciu_disable_gpio,
 991        .irq_mask = octeon_irq_ciu_disable_local,
 992        .irq_unmask = octeon_irq_ciu_enable,
 993        .irq_ack = octeon_irq_ciu_gpio_ack,
 994        .irq_set_type = octeon_irq_ciu_gpio_set_type,
 995#ifdef CONFIG_SMP
 996        .irq_set_affinity = octeon_irq_ciu_set_affinity,
 997        .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
 998#endif
 999        .flags = IRQCHIP_SET_TYPE_MASKED,
1000};
1001
1002/*
1003 * Watchdog interrupts are special.  They are associated with a single
1004 * core, so we hardwire the affinity to that core.
1005 */
1006static void octeon_irq_ciu_wd_enable(struct irq_data *data)
1007{
1008        unsigned long flags;
1009        unsigned long *pen;
1010        int coreid = data->irq - OCTEON_IRQ_WDOG0;      /* Bit 0-63 of EN1 */
1011        int cpu = octeon_cpu_for_coreid(coreid);
1012        raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
1013
1014        raw_spin_lock_irqsave(lock, flags);
1015        pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
1016        __set_bit(coreid, pen);
1017        /*
1018         * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling
1019         * the irq.
1020         */
1021        wmb();
1022        cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
1023        raw_spin_unlock_irqrestore(lock, flags);
1024}
1025
1026/*
1027 * Watchdog interrupts are special.  They are associated with a single
1028 * core, so we hardwire the affinity to that core.
1029 */
1030static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data)
1031{
1032        int coreid = data->irq - OCTEON_IRQ_WDOG0;
1033        int cpu = octeon_cpu_for_coreid(coreid);
1034
1035        set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
1036        cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid);
1037}
1038
1039
1040static struct irq_chip octeon_irq_chip_ciu_wd_v2 = {
1041        .name = "CIU-W",
1042        .irq_enable = octeon_irq_ciu1_wd_enable_v2,
1043        .irq_disable = octeon_irq_ciu_disable_all_v2,
1044        .irq_mask = octeon_irq_ciu_disable_local_v2,
1045        .irq_unmask = octeon_irq_ciu_enable_local_v2,
1046};
1047
1048static struct irq_chip octeon_irq_chip_ciu_wd = {
1049        .name = "CIU-W",
1050        .irq_enable = octeon_irq_ciu_wd_enable,
1051        .irq_disable = octeon_irq_ciu_disable_all,
1052        .irq_mask = octeon_irq_ciu_disable_local,
1053        .irq_unmask = octeon_irq_ciu_enable_local,
1054};
1055
1056static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit)
1057{
1058        bool edge = false;
1059
1060        if (line == 0)
1061                switch (bit) {
1062                case 48 ... 49: /* GMX DRP */
1063                case 50: /* IPD_DRP */
1064                case 52 ... 55: /* Timers */
1065                case 58: /* MPI */
1066                        edge = true;
1067                        break;
1068                default:
1069                        break;
1070                }
1071        else /* line == 1 */
1072                switch (bit) {
1073                case 47: /* PTP */
1074                        edge = true;
1075                        break;
1076                default:
1077                        break;
1078                }
1079        return edge;
1080}
1081
1082struct octeon_irq_gpio_domain_data {
1083        unsigned int base_hwirq;
1084};
1085
1086static int octeon_irq_gpio_xlat(struct irq_domain *d,
1087                                struct device_node *node,
1088                                const u32 *intspec,
1089                                unsigned int intsize,
1090                                unsigned long *out_hwirq,
1091                                unsigned int *out_type)
1092{
1093        unsigned int type;
1094        unsigned int pin;
1095        unsigned int trigger;
1096
1097        if (irq_domain_get_of_node(d) != node)
1098                return -EINVAL;
1099
1100        if (intsize < 2)
1101                return -EINVAL;
1102
1103        pin = intspec[0];
1104        if (pin >= 16)
1105                return -EINVAL;
1106
1107        trigger = intspec[1];
1108
1109        switch (trigger) {
1110        case 1:
1111                type = IRQ_TYPE_EDGE_RISING;
1112                break;
1113        case 2:
1114                type = IRQ_TYPE_EDGE_FALLING;
1115                break;
1116        case 4:
1117                type = IRQ_TYPE_LEVEL_HIGH;
1118                break;
1119        case 8:
1120                type = IRQ_TYPE_LEVEL_LOW;
1121                break;
1122        default:
1123                pr_err("Error: (%s) Invalid irq trigger specification: %x\n",
1124                       node->name,
1125                       trigger);
1126                type = IRQ_TYPE_LEVEL_LOW;
1127                break;
1128        }
1129        *out_type = type;
1130        *out_hwirq = pin;
1131
1132        return 0;
1133}
1134
1135static int octeon_irq_ciu_xlat(struct irq_domain *d,
1136                               struct device_node *node,
1137                               const u32 *intspec,
1138                               unsigned int intsize,
1139                               unsigned long *out_hwirq,
1140                               unsigned int *out_type)
1141{
1142        unsigned int ciu, bit;
1143        struct octeon_irq_ciu_domain_data *dd = d->host_data;
1144
1145        ciu = intspec[0];
1146        bit = intspec[1];
1147
1148        if (ciu >= dd->num_sum || bit > 63)
1149                return -EINVAL;
1150
1151        *out_hwirq = (ciu << 6) | bit;
1152        *out_type = 0;
1153
1154        return 0;
1155}
1156
1157static struct irq_chip *octeon_irq_ciu_chip;
1158static struct irq_chip *octeon_irq_ciu_chip_edge;
1159static struct irq_chip *octeon_irq_gpio_chip;
1160
1161static bool octeon_irq_virq_in_range(unsigned int virq)
1162{
1163        /* We cannot let it overflow the mapping array. */
1164        if (virq < (1ul << 8 * sizeof(octeon_irq_ciu_to_irq[0][0])))
1165                return true;
1166
1167        WARN_ONCE(true, "virq out of range %u.\n", virq);
1168        return false;
1169}
1170
1171static int octeon_irq_ciu_map(struct irq_domain *d,
1172                              unsigned int virq, irq_hw_number_t hw)
1173{
1174        int rv;
1175        unsigned int line = hw >> 6;
1176        unsigned int bit = hw & 63;
1177        struct octeon_irq_ciu_domain_data *dd = d->host_data;
1178
1179        if (!octeon_irq_virq_in_range(virq))
1180                return -EINVAL;
1181
1182        /* Don't map irq if it is reserved for GPIO. */
1183        if (line == 0 && bit >= 16 && bit <32)
1184                return 0;
1185
1186        if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0)
1187                return -EINVAL;
1188
1189        if (line == 2) {
1190                if (octeon_irq_ciu_is_edge(line, bit))
1191                        rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1192                                &octeon_irq_chip_ciu_sum2_edge,
1193                                handle_edge_irq);
1194                else
1195                        rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1196                                &octeon_irq_chip_ciu_sum2,
1197                                handle_level_irq);
1198        } else {
1199                if (octeon_irq_ciu_is_edge(line, bit))
1200                        rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1201                                octeon_irq_ciu_chip_edge,
1202                                handle_edge_irq);
1203                else
1204                        rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1205                                octeon_irq_ciu_chip,
1206                                handle_level_irq);
1207        }
1208        return rv;
1209}
1210
1211static int octeon_irq_gpio_map(struct irq_domain *d,
1212                               unsigned int virq, irq_hw_number_t hw)
1213{
1214        struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
1215        unsigned int line, bit;
1216        int r;
1217
1218        if (!octeon_irq_virq_in_range(virq))
1219                return -EINVAL;
1220
1221        line = (hw + gpiod->base_hwirq) >> 6;
1222        bit = (hw + gpiod->base_hwirq) & 63;
1223        if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
1224                octeon_irq_ciu_to_irq[line][bit] != 0)
1225                return -EINVAL;
1226
1227        /*
1228         * Default to handle_level_irq. If the DT contains a different
1229         * trigger type, it will call the irq_set_type callback and
1230         * the handler gets updated.
1231         */
1232        r = octeon_irq_set_ciu_mapping(virq, line, bit, hw,
1233                                       octeon_irq_gpio_chip, handle_level_irq);
1234        return r;
1235}
1236
1237static struct irq_domain_ops octeon_irq_domain_ciu_ops = {
1238        .map = octeon_irq_ciu_map,
1239        .unmap = octeon_irq_free_cd,
1240        .xlate = octeon_irq_ciu_xlat,
1241};
1242
1243static struct irq_domain_ops octeon_irq_domain_gpio_ops = {
1244        .map = octeon_irq_gpio_map,
1245        .unmap = octeon_irq_free_cd,
1246        .xlate = octeon_irq_gpio_xlat,
1247};
1248
1249static void octeon_irq_ip2_ciu(void)
1250{
1251        const unsigned long core_id = cvmx_get_core_num();
1252        u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
1253
1254        ciu_sum &= __this_cpu_read(octeon_irq_ciu0_en_mirror);
1255        if (likely(ciu_sum)) {
1256                int bit = fls64(ciu_sum) - 1;
1257                int irq = octeon_irq_ciu_to_irq[0][bit];
1258                if (likely(irq))
1259                        do_IRQ(irq);
1260                else
1261                        spurious_interrupt();
1262        } else {
1263                spurious_interrupt();
1264        }
1265}
1266
1267static void octeon_irq_ip3_ciu(void)
1268{
1269        u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
1270
1271        ciu_sum &= __this_cpu_read(octeon_irq_ciu1_en_mirror);
1272        if (likely(ciu_sum)) {
1273                int bit = fls64(ciu_sum) - 1;
1274                int irq = octeon_irq_ciu_to_irq[1][bit];
1275                if (likely(irq))
1276                        do_IRQ(irq);
1277                else
1278                        spurious_interrupt();
1279        } else {
1280                spurious_interrupt();
1281        }
1282}
1283
1284static void octeon_irq_ip4_ciu(void)
1285{
1286        int coreid = cvmx_get_core_num();
1287        u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid));
1288        u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid));
1289
1290        ciu_sum &= ciu_en;
1291        if (likely(ciu_sum)) {
1292                int bit = fls64(ciu_sum) - 1;
1293                int irq = octeon_irq_ciu_to_irq[2][bit];
1294
1295                if (likely(irq))
1296                        do_IRQ(irq);
1297                else
1298                        spurious_interrupt();
1299        } else {
1300                spurious_interrupt();
1301        }
1302}
1303
1304static bool octeon_irq_use_ip4;
1305
1306static void octeon_irq_local_enable_ip4(void *arg)
1307{
1308        set_c0_status(STATUSF_IP4);
1309}
1310
1311static void octeon_irq_ip4_mask(void)
1312{
1313        clear_c0_status(STATUSF_IP4);
1314        spurious_interrupt();
1315}
1316
1317static void (*octeon_irq_ip2)(void);
1318static void (*octeon_irq_ip3)(void);
1319static void (*octeon_irq_ip4)(void);
1320
1321void (*octeon_irq_setup_secondary)(void);
1322
1323void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h)
1324{
1325        octeon_irq_ip4 = h;
1326        octeon_irq_use_ip4 = true;
1327        on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1);
1328}
1329
1330static void octeon_irq_percpu_enable(void)
1331{
1332        irq_cpu_online();
1333}
1334
1335static void octeon_irq_init_ciu_percpu(void)
1336{
1337        int coreid = cvmx_get_core_num();
1338
1339
1340        __this_cpu_write(octeon_irq_ciu0_en_mirror, 0);
1341        __this_cpu_write(octeon_irq_ciu1_en_mirror, 0);
1342        wmb();
1343        raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock));
1344        /*
1345         * Disable All CIU Interrupts. The ones we need will be
1346         * enabled later.  Read the SUM register so we know the write
1347         * completed.
1348         */
1349        cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
1350        cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
1351        cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
1352        cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
1353        cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
1354}
1355
1356static void octeon_irq_init_ciu2_percpu(void)
1357{
1358        u64 regx, ipx;
1359        int coreid = cvmx_get_core_num();
1360        u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid);
1361
1362        /*
1363         * Disable All CIU2 Interrupts. The ones we need will be
1364         * enabled later.  Read the SUM register so we know the write
1365         * completed.
1366         *
1367         * There are 9 registers and 3 IPX levels with strides 0x1000
1368         * and 0x200 respectivly.  Use loops to clear them.
1369         */
1370        for (regx = 0; regx <= 0x8000; regx += 0x1000) {
1371                for (ipx = 0; ipx <= 0x400; ipx += 0x200)
1372                        cvmx_write_csr(base + regx + ipx, 0);
1373        }
1374
1375        cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid));
1376}
1377
1378static void octeon_irq_setup_secondary_ciu(void)
1379{
1380        octeon_irq_init_ciu_percpu();
1381        octeon_irq_percpu_enable();
1382
1383        /* Enable the CIU lines */
1384        set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1385        if (octeon_irq_use_ip4)
1386                set_c0_status(STATUSF_IP4);
1387        else
1388                clear_c0_status(STATUSF_IP4);
1389}
1390
1391static void octeon_irq_setup_secondary_ciu2(void)
1392{
1393        octeon_irq_init_ciu2_percpu();
1394        octeon_irq_percpu_enable();
1395
1396        /* Enable the CIU lines */
1397        set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1398        if (octeon_irq_use_ip4)
1399                set_c0_status(STATUSF_IP4);
1400        else
1401                clear_c0_status(STATUSF_IP4);
1402}
1403
1404static int __init octeon_irq_init_ciu(
1405        struct device_node *ciu_node, struct device_node *parent)
1406{
1407        unsigned int i, r;
1408        struct irq_chip *chip;
1409        struct irq_chip *chip_edge;
1410        struct irq_chip *chip_mbox;
1411        struct irq_chip *chip_wd;
1412        struct irq_domain *ciu_domain = NULL;
1413        struct octeon_irq_ciu_domain_data *dd;
1414
1415        dd = kzalloc(sizeof(*dd), GFP_KERNEL);
1416        if (!dd)
1417                return -ENOMEM;
1418
1419        octeon_irq_init_ciu_percpu();
1420        octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
1421
1422        octeon_irq_ip2 = octeon_irq_ip2_ciu;
1423        octeon_irq_ip3 = octeon_irq_ip3_ciu;
1424        if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3())
1425                && !OCTEON_IS_MODEL(OCTEON_CN63XX)) {
1426                octeon_irq_ip4 =  octeon_irq_ip4_ciu;
1427                dd->num_sum = 3;
1428                octeon_irq_use_ip4 = true;
1429        } else {
1430                octeon_irq_ip4 = octeon_irq_ip4_mask;
1431                dd->num_sum = 2;
1432                octeon_irq_use_ip4 = false;
1433        }
1434        if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
1435            OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
1436            OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
1437            OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
1438                chip = &octeon_irq_chip_ciu_v2;
1439                chip_edge = &octeon_irq_chip_ciu_v2_edge;
1440                chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
1441                chip_wd = &octeon_irq_chip_ciu_wd_v2;
1442                octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
1443        } else {
1444                chip = &octeon_irq_chip_ciu;
1445                chip_edge = &octeon_irq_chip_ciu_edge;
1446                chip_mbox = &octeon_irq_chip_ciu_mbox;
1447                chip_wd = &octeon_irq_chip_ciu_wd;
1448                octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
1449        }
1450        octeon_irq_ciu_chip = chip;
1451        octeon_irq_ciu_chip_edge = chip_edge;
1452
1453        /* Mips internal */
1454        octeon_irq_init_core();
1455
1456        ciu_domain = irq_domain_add_tree(
1457                ciu_node, &octeon_irq_domain_ciu_ops, dd);
1458        irq_set_default_host(ciu_domain);
1459
1460        /* CIU_0 */
1461        for (i = 0; i < 16; i++) {
1462                r = octeon_irq_force_ciu_mapping(
1463                        ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
1464                if (r)
1465                        goto err;
1466        }
1467
1468        r = octeon_irq_set_ciu_mapping(
1469                OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
1470        if (r)
1471                goto err;
1472        r = octeon_irq_set_ciu_mapping(
1473                OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
1474        if (r)
1475                goto err;
1476
1477        for (i = 0; i < 4; i++) {
1478                r = octeon_irq_force_ciu_mapping(
1479                        ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
1480                if (r)
1481                        goto err;
1482        }
1483        for (i = 0; i < 4; i++) {
1484                r = octeon_irq_force_ciu_mapping(
1485                        ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
1486                if (r)
1487                        goto err;
1488        }
1489
1490        r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
1491        if (r)
1492                goto err;
1493
1494        r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
1495        if (r)
1496                goto err;
1497
1498        for (i = 0; i < 4; i++) {
1499                r = octeon_irq_force_ciu_mapping(
1500                        ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
1501                if (r)
1502                        goto err;
1503        }
1504
1505        r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
1506        if (r)
1507                goto err;
1508
1509        r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
1510        if (r)
1511                goto err;
1512
1513        /* CIU_1 */
1514        for (i = 0; i < 16; i++) {
1515                r = octeon_irq_set_ciu_mapping(
1516                        i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd,
1517                        handle_level_irq);
1518                if (r)
1519                        goto err;
1520        }
1521
1522        r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
1523        if (r)
1524                goto err;
1525
1526        /* Enable the CIU lines */
1527        set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1528        if (octeon_irq_use_ip4)
1529                set_c0_status(STATUSF_IP4);
1530        else
1531                clear_c0_status(STATUSF_IP4);
1532
1533        return 0;
1534err:
1535        return r;
1536}
1537
1538static int __init octeon_irq_init_gpio(
1539        struct device_node *gpio_node, struct device_node *parent)
1540{
1541        struct octeon_irq_gpio_domain_data *gpiod;
1542        u32 interrupt_cells;
1543        unsigned int base_hwirq;
1544        int r;
1545
1546        r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells);
1547        if (r)
1548                return r;
1549
1550        if (interrupt_cells == 1) {
1551                u32 v;
1552
1553                r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v);
1554                if (r) {
1555                        pr_warn("No \"interrupts\" property.\n");
1556                        return r;
1557                }
1558                base_hwirq = v;
1559        } else if (interrupt_cells == 2) {
1560                u32 v0, v1;
1561
1562                r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0);
1563                if (r) {
1564                        pr_warn("No \"interrupts\" property.\n");
1565                        return r;
1566                }
1567                r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1);
1568                if (r) {
1569                        pr_warn("No \"interrupts\" property.\n");
1570                        return r;
1571                }
1572                base_hwirq = (v0 << 6) | v1;
1573        } else {
1574                pr_warn("Bad \"#interrupt-cells\" property: %u\n",
1575                        interrupt_cells);
1576                return -EINVAL;
1577        }
1578
1579        gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
1580        if (gpiod) {
1581                /* gpio domain host_data is the base hwirq number. */
1582                gpiod->base_hwirq = base_hwirq;
1583                irq_domain_add_linear(
1584                        gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
1585        } else {
1586                pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
1587                return -ENOMEM;
1588        }
1589
1590        return 0;
1591}
1592/*
1593 * Watchdog interrupts are special.  They are associated with a single
1594 * core, so we hardwire the affinity to that core.
1595 */
1596static void octeon_irq_ciu2_wd_enable(struct irq_data *data)
1597{
1598        u64 mask;
1599        u64 en_addr;
1600        int coreid = data->irq - OCTEON_IRQ_WDOG0;
1601        struct octeon_ciu_chip_data *cd;
1602
1603        cd = irq_data_get_irq_chip_data(data);
1604        mask = 1ull << (cd->bit);
1605
1606        en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
1607                (0x1000ull * cd->line);
1608        cvmx_write_csr(en_addr, mask);
1609
1610}
1611
1612static void octeon_irq_ciu2_enable(struct irq_data *data)
1613{
1614        u64 mask;
1615        u64 en_addr;
1616        int cpu = next_cpu_for_irq(data);
1617        int coreid = octeon_coreid_for_cpu(cpu);
1618        struct octeon_ciu_chip_data *cd;
1619
1620        cd = irq_data_get_irq_chip_data(data);
1621        mask = 1ull << (cd->bit);
1622
1623        en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
1624                (0x1000ull * cd->line);
1625        cvmx_write_csr(en_addr, mask);
1626}
1627
1628static void octeon_irq_ciu2_enable_local(struct irq_data *data)
1629{
1630        u64 mask;
1631        u64 en_addr;
1632        int coreid = cvmx_get_core_num();
1633        struct octeon_ciu_chip_data *cd;
1634
1635        cd = irq_data_get_irq_chip_data(data);
1636        mask = 1ull << (cd->bit);
1637
1638        en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
1639                (0x1000ull * cd->line);
1640        cvmx_write_csr(en_addr, mask);
1641
1642}
1643
1644static void octeon_irq_ciu2_disable_local(struct irq_data *data)
1645{
1646        u64 mask;
1647        u64 en_addr;
1648        int coreid = cvmx_get_core_num();
1649        struct octeon_ciu_chip_data *cd;
1650
1651        cd = irq_data_get_irq_chip_data(data);
1652        mask = 1ull << (cd->bit);
1653
1654        en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) +
1655                (0x1000ull * cd->line);
1656        cvmx_write_csr(en_addr, mask);
1657
1658}
1659
1660static void octeon_irq_ciu2_ack(struct irq_data *data)
1661{
1662        u64 mask;
1663        u64 en_addr;
1664        int coreid = cvmx_get_core_num();
1665        struct octeon_ciu_chip_data *cd;
1666
1667        cd = irq_data_get_irq_chip_data(data);
1668        mask = 1ull << (cd->bit);
1669
1670        en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line);
1671        cvmx_write_csr(en_addr, mask);
1672
1673}
1674
1675static void octeon_irq_ciu2_disable_all(struct irq_data *data)
1676{
1677        int cpu;
1678        u64 mask;
1679        struct octeon_ciu_chip_data *cd;
1680
1681        cd = irq_data_get_irq_chip_data(data);
1682        mask = 1ull << (cd->bit);
1683
1684        for_each_online_cpu(cpu) {
1685                u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
1686                        octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line);
1687                cvmx_write_csr(en_addr, mask);
1688        }
1689}
1690
1691static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data)
1692{
1693        int cpu;
1694        u64 mask;
1695
1696        mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1697
1698        for_each_online_cpu(cpu) {
1699                u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(
1700                        octeon_coreid_for_cpu(cpu));
1701                cvmx_write_csr(en_addr, mask);
1702        }
1703}
1704
1705static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data)
1706{
1707        int cpu;
1708        u64 mask;
1709
1710        mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1711
1712        for_each_online_cpu(cpu) {
1713                u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(
1714                        octeon_coreid_for_cpu(cpu));
1715                cvmx_write_csr(en_addr, mask);
1716        }
1717}
1718
1719static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data)
1720{
1721        u64 mask;
1722        u64 en_addr;
1723        int coreid = cvmx_get_core_num();
1724
1725        mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1726        en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid);
1727        cvmx_write_csr(en_addr, mask);
1728}
1729
1730static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data)
1731{
1732        u64 mask;
1733        u64 en_addr;
1734        int coreid = cvmx_get_core_num();
1735
1736        mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1737        en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid);
1738        cvmx_write_csr(en_addr, mask);
1739}
1740
1741#ifdef CONFIG_SMP
1742static int octeon_irq_ciu2_set_affinity(struct irq_data *data,
1743                                        const struct cpumask *dest, bool force)
1744{
1745        int cpu;
1746        bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
1747        u64 mask;
1748        struct octeon_ciu_chip_data *cd;
1749
1750        if (!enable_one)
1751                return 0;
1752
1753        cd = irq_data_get_irq_chip_data(data);
1754        mask = 1ull << cd->bit;
1755
1756        for_each_online_cpu(cpu) {
1757                u64 en_addr;
1758                if (cpumask_test_cpu(cpu, dest) && enable_one) {
1759                        enable_one = false;
1760                        en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(
1761                                octeon_coreid_for_cpu(cpu)) +
1762                                (0x1000ull * cd->line);
1763                } else {
1764                        en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
1765                                octeon_coreid_for_cpu(cpu)) +
1766                                (0x1000ull * cd->line);
1767                }
1768                cvmx_write_csr(en_addr, mask);
1769        }
1770
1771        return 0;
1772}
1773#endif
1774
1775static void octeon_irq_ciu2_enable_gpio(struct irq_data *data)
1776{
1777        octeon_irq_gpio_setup(data);
1778        octeon_irq_ciu2_enable(data);
1779}
1780
1781static void octeon_irq_ciu2_disable_gpio(struct irq_data *data)
1782{
1783        struct octeon_ciu_chip_data *cd;
1784
1785        cd = irq_data_get_irq_chip_data(data);
1786
1787        cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
1788
1789        octeon_irq_ciu2_disable_all(data);
1790}
1791
1792static struct irq_chip octeon_irq_chip_ciu2 = {
1793        .name = "CIU2-E",
1794        .irq_enable = octeon_irq_ciu2_enable,
1795        .irq_disable = octeon_irq_ciu2_disable_all,
1796        .irq_mask = octeon_irq_ciu2_disable_local,
1797        .irq_unmask = octeon_irq_ciu2_enable,
1798#ifdef CONFIG_SMP
1799        .irq_set_affinity = octeon_irq_ciu2_set_affinity,
1800        .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
1801#endif
1802};
1803
1804static struct irq_chip octeon_irq_chip_ciu2_edge = {
1805        .name = "CIU2-E",
1806        .irq_enable = octeon_irq_ciu2_enable,
1807        .irq_disable = octeon_irq_ciu2_disable_all,
1808        .irq_ack = octeon_irq_ciu2_ack,
1809        .irq_mask = octeon_irq_ciu2_disable_local,
1810        .irq_unmask = octeon_irq_ciu2_enable,
1811#ifdef CONFIG_SMP
1812        .irq_set_affinity = octeon_irq_ciu2_set_affinity,
1813        .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
1814#endif
1815};
1816
1817static struct irq_chip octeon_irq_chip_ciu2_mbox = {
1818        .name = "CIU2-M",
1819        .irq_enable = octeon_irq_ciu2_mbox_enable_all,
1820        .irq_disable = octeon_irq_ciu2_mbox_disable_all,
1821        .irq_ack = octeon_irq_ciu2_mbox_disable_local,
1822        .irq_eoi = octeon_irq_ciu2_mbox_enable_local,
1823
1824        .irq_cpu_online = octeon_irq_ciu2_mbox_enable_local,
1825        .irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local,
1826        .flags = IRQCHIP_ONOFFLINE_ENABLED,
1827};
1828
1829static struct irq_chip octeon_irq_chip_ciu2_wd = {
1830        .name = "CIU2-W",
1831        .irq_enable = octeon_irq_ciu2_wd_enable,
1832        .irq_disable = octeon_irq_ciu2_disable_all,
1833        .irq_mask = octeon_irq_ciu2_disable_local,
1834        .irq_unmask = octeon_irq_ciu2_enable_local,
1835};
1836
1837static struct irq_chip octeon_irq_chip_ciu2_gpio = {
1838        .name = "CIU-GPIO",
1839        .irq_enable = octeon_irq_ciu2_enable_gpio,
1840        .irq_disable = octeon_irq_ciu2_disable_gpio,
1841        .irq_ack = octeon_irq_ciu_gpio_ack,
1842        .irq_mask = octeon_irq_ciu2_disable_local,
1843        .irq_unmask = octeon_irq_ciu2_enable,
1844        .irq_set_type = octeon_irq_ciu_gpio_set_type,
1845#ifdef CONFIG_SMP
1846        .irq_set_affinity = octeon_irq_ciu2_set_affinity,
1847        .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
1848#endif
1849        .flags = IRQCHIP_SET_TYPE_MASKED,
1850};
1851
1852static int octeon_irq_ciu2_xlat(struct irq_domain *d,
1853                                struct device_node *node,
1854                                const u32 *intspec,
1855                                unsigned int intsize,
1856                                unsigned long *out_hwirq,
1857                                unsigned int *out_type)
1858{
1859        unsigned int ciu, bit;
1860
1861        ciu = intspec[0];
1862        bit = intspec[1];
1863
1864        *out_hwirq = (ciu << 6) | bit;
1865        *out_type = 0;
1866
1867        return 0;
1868}
1869
1870static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit)
1871{
1872        bool edge = false;
1873
1874        if (line == 3) /* MIO */
1875                switch (bit) {
1876                case 2:  /* IPD_DRP */
1877                case 8 ... 11: /* Timers */
1878                case 48: /* PTP */
1879                        edge = true;
1880                        break;
1881                default:
1882                        break;
1883                }
1884        else if (line == 6) /* PKT */
1885                switch (bit) {
1886                case 52 ... 53: /* ILK_DRP */
1887                case 8 ... 12:  /* GMX_DRP */
1888                        edge = true;
1889                        break;
1890                default:
1891                        break;
1892                }
1893        return edge;
1894}
1895
1896static int octeon_irq_ciu2_map(struct irq_domain *d,
1897                               unsigned int virq, irq_hw_number_t hw)
1898{
1899        unsigned int line = hw >> 6;
1900        unsigned int bit = hw & 63;
1901
1902        if (!octeon_irq_virq_in_range(virq))
1903                return -EINVAL;
1904
1905        /*
1906         * Don't map irq if it is reserved for GPIO.
1907         * (Line 7 are the GPIO lines.)
1908         */
1909        if (line == 7)
1910                return 0;
1911
1912        if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0)
1913                return -EINVAL;
1914
1915        if (octeon_irq_ciu2_is_edge(line, bit))
1916                octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1917                                           &octeon_irq_chip_ciu2_edge,
1918                                           handle_edge_irq);
1919        else
1920                octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1921                                           &octeon_irq_chip_ciu2,
1922                                           handle_level_irq);
1923
1924        return 0;
1925}
1926
1927static struct irq_domain_ops octeon_irq_domain_ciu2_ops = {
1928        .map = octeon_irq_ciu2_map,
1929        .unmap = octeon_irq_free_cd,
1930        .xlate = octeon_irq_ciu2_xlat,
1931};
1932
1933static void octeon_irq_ciu2(void)
1934{
1935        int line;
1936        int bit;
1937        int irq;
1938        u64 src_reg, src, sum;
1939        const unsigned long core_id = cvmx_get_core_num();
1940
1941        sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful;
1942
1943        if (unlikely(!sum))
1944                goto spurious;
1945
1946        line = fls64(sum) - 1;
1947        src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line);
1948        src = cvmx_read_csr(src_reg);
1949
1950        if (unlikely(!src))
1951                goto spurious;
1952
1953        bit = fls64(src) - 1;
1954        irq = octeon_irq_ciu_to_irq[line][bit];
1955        if (unlikely(!irq))
1956                goto spurious;
1957
1958        do_IRQ(irq);
1959        goto out;
1960
1961spurious:
1962        spurious_interrupt();
1963out:
1964        /* CN68XX pass 1.x has an errata that accessing the ACK registers
1965                can stop interrupts from propagating */
1966        if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1967                cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
1968        else
1969                cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id));
1970        return;
1971}
1972
1973static void octeon_irq_ciu2_mbox(void)
1974{
1975        int line;
1976
1977        const unsigned long core_id = cvmx_get_core_num();
1978        u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60;
1979
1980        if (unlikely(!sum))
1981                goto spurious;
1982
1983        line = fls64(sum) - 1;
1984
1985        do_IRQ(OCTEON_IRQ_MBOX0 + line);
1986        goto out;
1987
1988spurious:
1989        spurious_interrupt();
1990out:
1991        /* CN68XX pass 1.x has an errata that accessing the ACK registers
1992                can stop interrupts from propagating */
1993        if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1994                cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
1995        else
1996                cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id));
1997        return;
1998}
1999
2000static int __init octeon_irq_init_ciu2(
2001        struct device_node *ciu_node, struct device_node *parent)
2002{
2003        unsigned int i, r;
2004        struct irq_domain *ciu_domain = NULL;
2005
2006        octeon_irq_init_ciu2_percpu();
2007        octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2;
2008
2009        octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio;
2010        octeon_irq_ip2 = octeon_irq_ciu2;
2011        octeon_irq_ip3 = octeon_irq_ciu2_mbox;
2012        octeon_irq_ip4 = octeon_irq_ip4_mask;
2013
2014        /* Mips internal */
2015        octeon_irq_init_core();
2016
2017        ciu_domain = irq_domain_add_tree(
2018                ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
2019        irq_set_default_host(ciu_domain);
2020
2021        /* CUI2 */
2022        for (i = 0; i < 64; i++) {
2023                r = octeon_irq_force_ciu_mapping(
2024                        ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
2025                if (r)
2026                        goto err;
2027        }
2028
2029        for (i = 0; i < 32; i++) {
2030                r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
2031                        &octeon_irq_chip_ciu2_wd, handle_level_irq);
2032                if (r)
2033                        goto err;
2034        }
2035
2036        for (i = 0; i < 4; i++) {
2037                r = octeon_irq_force_ciu_mapping(
2038                        ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
2039                if (r)
2040                        goto err;
2041        }
2042
2043        r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44);
2044        if (r)
2045                goto err;
2046
2047        for (i = 0; i < 4; i++) {
2048                r = octeon_irq_force_ciu_mapping(
2049                        ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
2050                if (r)
2051                        goto err;
2052        }
2053
2054        for (i = 0; i < 4; i++) {
2055                r = octeon_irq_force_ciu_mapping(
2056                        ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
2057                if (r)
2058                        goto err;
2059        }
2060
2061        irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
2062        irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
2063        irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
2064        irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
2065
2066        /* Enable the CIU lines */
2067        set_c0_status(STATUSF_IP3 | STATUSF_IP2);
2068        clear_c0_status(STATUSF_IP4);
2069        return 0;
2070err:
2071        return r;
2072}
2073
2074struct octeon_irq_cib_host_data {
2075        raw_spinlock_t lock;
2076        u64 raw_reg;
2077        u64 en_reg;
2078        int max_bits;
2079};
2080
2081struct octeon_irq_cib_chip_data {
2082        struct octeon_irq_cib_host_data *host_data;
2083        int bit;
2084};
2085
2086static void octeon_irq_cib_enable(struct irq_data *data)
2087{
2088        unsigned long flags;
2089        u64 en;
2090        struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
2091        struct octeon_irq_cib_host_data *host_data = cd->host_data;
2092
2093        raw_spin_lock_irqsave(&host_data->lock, flags);
2094        en = cvmx_read_csr(host_data->en_reg);
2095        en |= 1ull << cd->bit;
2096        cvmx_write_csr(host_data->en_reg, en);
2097        raw_spin_unlock_irqrestore(&host_data->lock, flags);
2098}
2099
2100static void octeon_irq_cib_disable(struct irq_data *data)
2101{
2102        unsigned long flags;
2103        u64 en;
2104        struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
2105        struct octeon_irq_cib_host_data *host_data = cd->host_data;
2106
2107        raw_spin_lock_irqsave(&host_data->lock, flags);
2108        en = cvmx_read_csr(host_data->en_reg);
2109        en &= ~(1ull << cd->bit);
2110        cvmx_write_csr(host_data->en_reg, en);
2111        raw_spin_unlock_irqrestore(&host_data->lock, flags);
2112}
2113
2114static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t)
2115{
2116        irqd_set_trigger_type(data, t);
2117        return IRQ_SET_MASK_OK;
2118}
2119
2120static struct irq_chip octeon_irq_chip_cib = {
2121        .name = "CIB",
2122        .irq_enable = octeon_irq_cib_enable,
2123        .irq_disable = octeon_irq_cib_disable,
2124        .irq_mask = octeon_irq_cib_disable,
2125        .irq_unmask = octeon_irq_cib_enable,
2126        .irq_set_type = octeon_irq_cib_set_type,
2127};
2128
2129static int octeon_irq_cib_xlat(struct irq_domain *d,
2130                                   struct device_node *node,
2131                                   const u32 *intspec,
2132                                   unsigned int intsize,
2133                                   unsigned long *out_hwirq,
2134                                   unsigned int *out_type)
2135{
2136        unsigned int type = 0;
2137
2138        if (intsize == 2)
2139                type = intspec[1];
2140
2141        switch (type) {
2142        case 0: /* unofficial value, but we might as well let it work. */
2143        case 4: /* official value for level triggering. */
2144                *out_type = IRQ_TYPE_LEVEL_HIGH;
2145                break;
2146        case 1: /* official value for edge triggering. */
2147                *out_type = IRQ_TYPE_EDGE_RISING;
2148                break;
2149        default: /* Nothing else is acceptable. */
2150                return -EINVAL;
2151        }
2152
2153        *out_hwirq = intspec[0];
2154
2155        return 0;
2156}
2157
2158static int octeon_irq_cib_map(struct irq_domain *d,
2159                              unsigned int virq, irq_hw_number_t hw)
2160{
2161        struct octeon_irq_cib_host_data *host_data = d->host_data;
2162        struct octeon_irq_cib_chip_data *cd;
2163
2164        if (hw >= host_data->max_bits) {
2165                pr_err("ERROR: %s mapping %u is to big!\n",
2166                       irq_domain_get_of_node(d)->name, (unsigned)hw);
2167                return -EINVAL;
2168        }
2169
2170        cd = kzalloc(sizeof(*cd), GFP_KERNEL);
2171        cd->host_data = host_data;
2172        cd->bit = hw;
2173
2174        irq_set_chip_and_handler(virq, &octeon_irq_chip_cib,
2175                                 handle_simple_irq);
2176        irq_set_chip_data(virq, cd);
2177        return 0;
2178}
2179
2180static struct irq_domain_ops octeon_irq_domain_cib_ops = {
2181        .map = octeon_irq_cib_map,
2182        .unmap = octeon_irq_free_cd,
2183        .xlate = octeon_irq_cib_xlat,
2184};
2185
2186/* Chain to real handler. */
2187static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data)
2188{
2189        u64 en;
2190        u64 raw;
2191        u64 bits;
2192        int i;
2193        int irq;
2194        struct irq_domain *cib_domain = data;
2195        struct octeon_irq_cib_host_data *host_data = cib_domain->host_data;
2196
2197        en = cvmx_read_csr(host_data->en_reg);
2198        raw = cvmx_read_csr(host_data->raw_reg);
2199
2200        bits = en & raw;
2201
2202        for (i = 0; i < host_data->max_bits; i++) {
2203                if ((bits & 1ull << i) == 0)
2204                        continue;
2205                irq = irq_find_mapping(cib_domain, i);
2206                if (!irq) {
2207                        unsigned long flags;
2208
2209                        pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n",
2210                                i, host_data->raw_reg);
2211                        raw_spin_lock_irqsave(&host_data->lock, flags);
2212                        en = cvmx_read_csr(host_data->en_reg);
2213                        en &= ~(1ull << i);
2214                        cvmx_write_csr(host_data->en_reg, en);
2215                        cvmx_write_csr(host_data->raw_reg, 1ull << i);
2216                        raw_spin_unlock_irqrestore(&host_data->lock, flags);
2217                } else {
2218                        struct irq_desc *desc = irq_to_desc(irq);
2219                        struct irq_data *irq_data = irq_desc_get_irq_data(desc);
2220                        /* If edge, acknowledge the bit we will be sending. */
2221                        if (irqd_get_trigger_type(irq_data) &
2222                                IRQ_TYPE_EDGE_BOTH)
2223                                cvmx_write_csr(host_data->raw_reg, 1ull << i);
2224                        generic_handle_irq_desc(desc);
2225                }
2226        }
2227
2228        return IRQ_HANDLED;
2229}
2230
2231static int __init octeon_irq_init_cib(struct device_node *ciu_node,
2232                                      struct device_node *parent)
2233{
2234        const __be32 *addr;
2235        u32 val;
2236        struct octeon_irq_cib_host_data *host_data;
2237        int parent_irq;
2238        int r;
2239        struct irq_domain *cib_domain;
2240
2241        parent_irq = irq_of_parse_and_map(ciu_node, 0);
2242        if (!parent_irq) {
2243                pr_err("ERROR: Couldn't acquire parent_irq for %s\n.",
2244                        ciu_node->name);
2245                return -EINVAL;
2246        }
2247
2248        host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
2249        raw_spin_lock_init(&host_data->lock);
2250
2251        addr = of_get_address(ciu_node, 0, NULL, NULL);
2252        if (!addr) {
2253                pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name);
2254                return -EINVAL;
2255        }
2256        host_data->raw_reg = (u64)phys_to_virt(
2257                of_translate_address(ciu_node, addr));
2258
2259        addr = of_get_address(ciu_node, 1, NULL, NULL);
2260        if (!addr) {
2261                pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name);
2262                return -EINVAL;
2263        }
2264        host_data->en_reg = (u64)phys_to_virt(
2265                of_translate_address(ciu_node, addr));
2266
2267        r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
2268        if (r) {
2269                pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.",
2270                        ciu_node->name);
2271                return r;
2272        }
2273        host_data->max_bits = val;
2274
2275        cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits,
2276                                           &octeon_irq_domain_cib_ops,
2277                                           host_data);
2278        if (!cib_domain) {
2279                pr_err("ERROR: Couldn't irq_domain_add_linear()\n.");
2280                return -ENOMEM;
2281        }
2282
2283        cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */
2284        cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */
2285
2286        r = request_irq(parent_irq, octeon_irq_cib_handler,
2287                        IRQF_NO_THREAD, "cib", cib_domain);
2288        if (r) {
2289                pr_err("request_irq cib failed %d\n", r);
2290                return r;
2291        }
2292        pr_info("CIB interrupt controller probed: %llx %d\n",
2293                host_data->raw_reg, host_data->max_bits);
2294        return 0;
2295}
2296
2297static struct of_device_id ciu_types[] __initdata = {
2298        {.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu},
2299        {.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio},
2300        {.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2},
2301        {.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib},
2302        {}
2303};
2304
2305void __init arch_init_irq(void)
2306{
2307#ifdef CONFIG_SMP
2308        /* Set the default affinity to the boot cpu. */
2309        cpumask_clear(irq_default_affinity);
2310        cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
2311#endif
2312        of_irq_init(ciu_types);
2313}
2314
2315asmlinkage void plat_irq_dispatch(void)
2316{
2317        unsigned long cop0_cause;
2318        unsigned long cop0_status;
2319
2320        while (1) {
2321                cop0_cause = read_c0_cause();
2322                cop0_status = read_c0_status();
2323                cop0_cause &= cop0_status;
2324                cop0_cause &= ST0_IM;
2325
2326                if (cop0_cause & STATUSF_IP2)
2327                        octeon_irq_ip2();
2328                else if (cop0_cause & STATUSF_IP3)
2329                        octeon_irq_ip3();
2330                else if (cop0_cause & STATUSF_IP4)
2331                        octeon_irq_ip4();
2332                else if (cop0_cause)
2333                        do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
2334                else
2335                        break;
2336        }
2337}
2338
2339#ifdef CONFIG_HOTPLUG_CPU
2340
2341void octeon_fixup_irqs(void)
2342{
2343        irq_cpu_offline();
2344}
2345
2346#endif /* CONFIG_HOTPLUG_CPU */
2347