linux/drivers/soc/fsl/qe/qe_ic.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * arch/powerpc/sysdev/qe_lib/qe_ic.c
   4 *
   5 * Copyright (C) 2006 Freescale Semiconductor, Inc.  All rights reserved.
   6 *
   7 * Author: Li Yang <leoli@freescale.com>
   8 * Based on code from Shlomi Gridish <gridish@freescale.com>
   9 *
  10 * QUICC ENGINE Interrupt Controller
  11 */
  12
  13#include <linux/of_irq.h>
  14#include <linux/of_address.h>
  15#include <linux/kernel.h>
  16#include <linux/init.h>
  17#include <linux/errno.h>
  18#include <linux/irq.h>
  19#include <linux/reboot.h>
  20#include <linux/slab.h>
  21#include <linux/stddef.h>
  22#include <linux/sched.h>
  23#include <linux/signal.h>
  24#include <linux/device.h>
  25#include <linux/spinlock.h>
  26#include <linux/platform_device.h>
  27#include <asm/irq.h>
  28#include <asm/io.h>
  29#include <soc/fsl/qe/qe.h>
  30
  31#define NR_QE_IC_INTS           64
  32
  33/* QE IC registers offset */
  34#define QEIC_CICR               0x00
  35#define QEIC_CIVEC              0x04
  36#define QEIC_CIPXCC             0x10
  37#define QEIC_CIPYCC             0x14
  38#define QEIC_CIPWCC             0x18
  39#define QEIC_CIPZCC             0x1c
  40#define QEIC_CIMR               0x20
  41#define QEIC_CRIMR              0x24
  42#define QEIC_CIPRTA             0x30
  43#define QEIC_CIPRTB             0x34
  44#define QEIC_CHIVEC             0x60
  45
  46struct qe_ic {
  47        /* Control registers offset */
  48        __be32 __iomem *regs;
  49
  50        /* The remapper for this QEIC */
  51        struct irq_domain *irqhost;
  52
  53        /* The "linux" controller struct */
  54        struct irq_chip hc_irq;
  55
  56        /* VIRQ numbers of QE high/low irqs */
  57        int virq_high;
  58        int virq_low;
  59};
  60
  61/*
  62 * QE interrupt controller internal structure
  63 */
  64struct qe_ic_info {
  65        /* Location of this source at the QIMR register */
  66        u32     mask;
  67
  68        /* Mask register offset */
  69        u32     mask_reg;
  70
  71        /*
  72         * For grouped interrupts sources - the interrupt code as
  73         * appears at the group priority register
  74         */
  75        u8      pri_code;
  76
  77        /* Group priority register offset */
  78        u32     pri_reg;
  79};
  80
  81static DEFINE_RAW_SPINLOCK(qe_ic_lock);
  82
  83static struct qe_ic_info qe_ic_info[] = {
  84        [1] = {
  85               .mask = 0x00008000,
  86               .mask_reg = QEIC_CIMR,
  87               .pri_code = 0,
  88               .pri_reg = QEIC_CIPWCC,
  89               },
  90        [2] = {
  91               .mask = 0x00004000,
  92               .mask_reg = QEIC_CIMR,
  93               .pri_code = 1,
  94               .pri_reg = QEIC_CIPWCC,
  95               },
  96        [3] = {
  97               .mask = 0x00002000,
  98               .mask_reg = QEIC_CIMR,
  99               .pri_code = 2,
 100               .pri_reg = QEIC_CIPWCC,
 101               },
 102        [10] = {
 103                .mask = 0x00000040,
 104                .mask_reg = QEIC_CIMR,
 105                .pri_code = 1,
 106                .pri_reg = QEIC_CIPZCC,
 107                },
 108        [11] = {
 109                .mask = 0x00000020,
 110                .mask_reg = QEIC_CIMR,
 111                .pri_code = 2,
 112                .pri_reg = QEIC_CIPZCC,
 113                },
 114        [12] = {
 115                .mask = 0x00000010,
 116                .mask_reg = QEIC_CIMR,
 117                .pri_code = 3,
 118                .pri_reg = QEIC_CIPZCC,
 119                },
 120        [13] = {
 121                .mask = 0x00000008,
 122                .mask_reg = QEIC_CIMR,
 123                .pri_code = 4,
 124                .pri_reg = QEIC_CIPZCC,
 125                },
 126        [14] = {
 127                .mask = 0x00000004,
 128                .mask_reg = QEIC_CIMR,
 129                .pri_code = 5,
 130                .pri_reg = QEIC_CIPZCC,
 131                },
 132        [15] = {
 133                .mask = 0x00000002,
 134                .mask_reg = QEIC_CIMR,
 135                .pri_code = 6,
 136                .pri_reg = QEIC_CIPZCC,
 137                },
 138        [20] = {
 139                .mask = 0x10000000,
 140                .mask_reg = QEIC_CRIMR,
 141                .pri_code = 3,
 142                .pri_reg = QEIC_CIPRTA,
 143                },
 144        [25] = {
 145                .mask = 0x00800000,
 146                .mask_reg = QEIC_CRIMR,
 147                .pri_code = 0,
 148                .pri_reg = QEIC_CIPRTB,
 149                },
 150        [26] = {
 151                .mask = 0x00400000,
 152                .mask_reg = QEIC_CRIMR,
 153                .pri_code = 1,
 154                .pri_reg = QEIC_CIPRTB,
 155                },
 156        [27] = {
 157                .mask = 0x00200000,
 158                .mask_reg = QEIC_CRIMR,
 159                .pri_code = 2,
 160                .pri_reg = QEIC_CIPRTB,
 161                },
 162        [28] = {
 163                .mask = 0x00100000,
 164                .mask_reg = QEIC_CRIMR,
 165                .pri_code = 3,
 166                .pri_reg = QEIC_CIPRTB,
 167                },
 168        [32] = {
 169                .mask = 0x80000000,
 170                .mask_reg = QEIC_CIMR,
 171                .pri_code = 0,
 172                .pri_reg = QEIC_CIPXCC,
 173                },
 174        [33] = {
 175                .mask = 0x40000000,
 176                .mask_reg = QEIC_CIMR,
 177                .pri_code = 1,
 178                .pri_reg = QEIC_CIPXCC,
 179                },
 180        [34] = {
 181                .mask = 0x20000000,
 182                .mask_reg = QEIC_CIMR,
 183                .pri_code = 2,
 184                .pri_reg = QEIC_CIPXCC,
 185                },
 186        [35] = {
 187                .mask = 0x10000000,
 188                .mask_reg = QEIC_CIMR,
 189                .pri_code = 3,
 190                .pri_reg = QEIC_CIPXCC,
 191                },
 192        [36] = {
 193                .mask = 0x08000000,
 194                .mask_reg = QEIC_CIMR,
 195                .pri_code = 4,
 196                .pri_reg = QEIC_CIPXCC,
 197                },
 198        [40] = {
 199                .mask = 0x00800000,
 200                .mask_reg = QEIC_CIMR,
 201                .pri_code = 0,
 202                .pri_reg = QEIC_CIPYCC,
 203                },
 204        [41] = {
 205                .mask = 0x00400000,
 206                .mask_reg = QEIC_CIMR,
 207                .pri_code = 1,
 208                .pri_reg = QEIC_CIPYCC,
 209                },
 210        [42] = {
 211                .mask = 0x00200000,
 212                .mask_reg = QEIC_CIMR,
 213                .pri_code = 2,
 214                .pri_reg = QEIC_CIPYCC,
 215                },
 216        [43] = {
 217                .mask = 0x00100000,
 218                .mask_reg = QEIC_CIMR,
 219                .pri_code = 3,
 220                .pri_reg = QEIC_CIPYCC,
 221                },
 222};
 223
 224static inline u32 qe_ic_read(__be32  __iomem *base, unsigned int reg)
 225{
 226        return ioread32be(base + (reg >> 2));
 227}
 228
 229static inline void qe_ic_write(__be32  __iomem *base, unsigned int reg,
 230                               u32 value)
 231{
 232        iowrite32be(value, base + (reg >> 2));
 233}
 234
 235static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
 236{
 237        return irq_get_chip_data(virq);
 238}
 239
 240static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
 241{
 242        return irq_data_get_irq_chip_data(d);
 243}
 244
 245static void qe_ic_unmask_irq(struct irq_data *d)
 246{
 247        struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
 248        unsigned int src = irqd_to_hwirq(d);
 249        unsigned long flags;
 250        u32 temp;
 251
 252        raw_spin_lock_irqsave(&qe_ic_lock, flags);
 253
 254        temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
 255        qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
 256                    temp | qe_ic_info[src].mask);
 257
 258        raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
 259}
 260
 261static void qe_ic_mask_irq(struct irq_data *d)
 262{
 263        struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
 264        unsigned int src = irqd_to_hwirq(d);
 265        unsigned long flags;
 266        u32 temp;
 267
 268        raw_spin_lock_irqsave(&qe_ic_lock, flags);
 269
 270        temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
 271        qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
 272                    temp & ~qe_ic_info[src].mask);
 273
 274        /* Flush the above write before enabling interrupts; otherwise,
 275         * spurious interrupts will sometimes happen.  To be 100% sure
 276         * that the write has reached the device before interrupts are
 277         * enabled, the mask register would have to be read back; however,
 278         * this is not required for correctness, only to avoid wasting
 279         * time on a large number of spurious interrupts.  In testing,
 280         * a sync reduced the observed spurious interrupts to zero.
 281         */
 282        mb();
 283
 284        raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
 285}
 286
 287static struct irq_chip qe_ic_irq_chip = {
 288        .name = "QEIC",
 289        .irq_unmask = qe_ic_unmask_irq,
 290        .irq_mask = qe_ic_mask_irq,
 291        .irq_mask_ack = qe_ic_mask_irq,
 292};
 293
 294static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
 295                            enum irq_domain_bus_token bus_token)
 296{
 297        /* Exact match, unless qe_ic node is NULL */
 298        struct device_node *of_node = irq_domain_get_of_node(h);
 299        return of_node == NULL || of_node == node;
 300}
 301
 302static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
 303                          irq_hw_number_t hw)
 304{
 305        struct qe_ic *qe_ic = h->host_data;
 306        struct irq_chip *chip;
 307
 308        if (hw >= ARRAY_SIZE(qe_ic_info)) {
 309                pr_err("%s: Invalid hw irq number for QEIC\n", __func__);
 310                return -EINVAL;
 311        }
 312
 313        if (qe_ic_info[hw].mask == 0) {
 314                printk(KERN_ERR "Can't map reserved IRQ\n");
 315                return -EINVAL;
 316        }
 317        /* Default chip */
 318        chip = &qe_ic->hc_irq;
 319
 320        irq_set_chip_data(virq, qe_ic);
 321        irq_set_status_flags(virq, IRQ_LEVEL);
 322
 323        irq_set_chip_and_handler(virq, chip, handle_level_irq);
 324
 325        return 0;
 326}
 327
 328static const struct irq_domain_ops qe_ic_host_ops = {
 329        .match = qe_ic_host_match,
 330        .map = qe_ic_host_map,
 331        .xlate = irq_domain_xlate_onetwocell,
 332};
 333
 334/* Return an interrupt vector or 0 if no interrupt is pending. */
 335static unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
 336{
 337        int irq;
 338
 339        BUG_ON(qe_ic == NULL);
 340
 341        /* get the interrupt source vector. */
 342        irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
 343
 344        if (irq == 0)
 345                return 0;
 346
 347        return irq_linear_revmap(qe_ic->irqhost, irq);
 348}
 349
 350/* Return an interrupt vector or 0 if no interrupt is pending. */
 351static unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
 352{
 353        int irq;
 354
 355        BUG_ON(qe_ic == NULL);
 356
 357        /* get the interrupt source vector. */
 358        irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
 359
 360        if (irq == 0)
 361                return 0;
 362
 363        return irq_linear_revmap(qe_ic->irqhost, irq);
 364}
 365
 366static void qe_ic_cascade_low(struct irq_desc *desc)
 367{
 368        struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
 369        unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
 370        struct irq_chip *chip = irq_desc_get_chip(desc);
 371
 372        if (cascade_irq != 0)
 373                generic_handle_irq(cascade_irq);
 374
 375        if (chip->irq_eoi)
 376                chip->irq_eoi(&desc->irq_data);
 377}
 378
 379static void qe_ic_cascade_high(struct irq_desc *desc)
 380{
 381        struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
 382        unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
 383        struct irq_chip *chip = irq_desc_get_chip(desc);
 384
 385        if (cascade_irq != 0)
 386                generic_handle_irq(cascade_irq);
 387
 388        if (chip->irq_eoi)
 389                chip->irq_eoi(&desc->irq_data);
 390}
 391
 392static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
 393{
 394        struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
 395        unsigned int cascade_irq;
 396        struct irq_chip *chip = irq_desc_get_chip(desc);
 397
 398        cascade_irq = qe_ic_get_high_irq(qe_ic);
 399        if (cascade_irq == 0)
 400                cascade_irq = qe_ic_get_low_irq(qe_ic);
 401
 402        if (cascade_irq != 0)
 403                generic_handle_irq(cascade_irq);
 404
 405        chip->irq_eoi(&desc->irq_data);
 406}
 407
 408static int qe_ic_init(struct platform_device *pdev)
 409{
 410        struct device *dev = &pdev->dev;
 411        void (*low_handler)(struct irq_desc *desc);
 412        void (*high_handler)(struct irq_desc *desc);
 413        struct qe_ic *qe_ic;
 414        struct resource *res;
 415        struct device_node *node = pdev->dev.of_node;
 416
 417        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 418        if (res == NULL) {
 419                dev_err(dev, "no memory resource defined\n");
 420                return -ENODEV;
 421        }
 422
 423        qe_ic = devm_kzalloc(dev, sizeof(*qe_ic), GFP_KERNEL);
 424        if (qe_ic == NULL)
 425                return -ENOMEM;
 426
 427        qe_ic->regs = devm_ioremap(dev, res->start, resource_size(res));
 428        if (qe_ic->regs == NULL) {
 429                dev_err(dev, "failed to ioremap() registers\n");
 430                return -ENODEV;
 431        }
 432
 433        qe_ic->hc_irq = qe_ic_irq_chip;
 434
 435        qe_ic->virq_high = platform_get_irq(pdev, 0);
 436        qe_ic->virq_low = platform_get_irq(pdev, 1);
 437
 438        if (qe_ic->virq_low <= 0)
 439                return -ENODEV;
 440
 441        if (qe_ic->virq_high > 0 && qe_ic->virq_high != qe_ic->virq_low) {
 442                low_handler = qe_ic_cascade_low;
 443                high_handler = qe_ic_cascade_high;
 444        } else {
 445                low_handler = qe_ic_cascade_muxed_mpic;
 446                high_handler = NULL;
 447        }
 448
 449        qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
 450                                               &qe_ic_host_ops, qe_ic);
 451        if (qe_ic->irqhost == NULL) {
 452                dev_err(dev, "failed to add irq domain\n");
 453                return -ENODEV;
 454        }
 455
 456        qe_ic_write(qe_ic->regs, QEIC_CICR, 0);
 457
 458        irq_set_handler_data(qe_ic->virq_low, qe_ic);
 459        irq_set_chained_handler(qe_ic->virq_low, low_handler);
 460
 461        if (high_handler) {
 462                irq_set_handler_data(qe_ic->virq_high, qe_ic);
 463                irq_set_chained_handler(qe_ic->virq_high, high_handler);
 464        }
 465        return 0;
 466}
 467static const struct of_device_id qe_ic_ids[] = {
 468        { .compatible = "fsl,qe-ic"},
 469        { .type = "qeic"},
 470        {},
 471};
 472
 473static struct platform_driver qe_ic_driver =
 474{
 475        .driver = {
 476                .name           = "qe-ic",
 477                .of_match_table = qe_ic_ids,
 478        },
 479        .probe  = qe_ic_init,
 480};
 481
 482static int __init qe_ic_of_init(void)
 483{
 484        platform_driver_register(&qe_ic_driver);
 485        return 0;
 486}
 487subsys_initcall(qe_ic_of_init);
 488