linux/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
   3 */
   4
   5#include <linux/bitops.h>
   6#include <linux/slab.h>
   7
   8#include "dpu_kms.h"
   9#include "dpu_hw_interrupts.h"
  10#include "dpu_hw_util.h"
  11#include "dpu_hw_mdss.h"
  12
  13/**
  14 * Register offsets in MDSS register file for the interrupt registers
  15 * w.r.t. to the MDP base
  16 */
  17#define MDP_SSPP_TOP0_OFF               0x0
  18#define MDP_INTF_0_OFF                  0x6A000
  19#define MDP_INTF_1_OFF                  0x6A800
  20#define MDP_INTF_2_OFF                  0x6B000
  21#define MDP_INTF_3_OFF                  0x6B800
  22#define MDP_INTF_4_OFF                  0x6C000
  23#define MDP_AD4_0_OFF                   0x7C000
  24#define MDP_AD4_1_OFF                   0x7D000
  25#define MDP_AD4_INTR_EN_OFF             0x41c
  26#define MDP_AD4_INTR_CLEAR_OFF          0x424
  27#define MDP_AD4_INTR_STATUS_OFF         0x420
  28#define MDP_INTF_0_OFF_REV_7xxx             0x34000
  29#define MDP_INTF_1_OFF_REV_7xxx             0x35000
  30#define MDP_INTF_5_OFF_REV_7xxx             0x39000
  31
  32/**
  33 * struct dpu_intr_reg - array of DPU register sets
  34 * @clr_off:    offset to CLEAR reg
  35 * @en_off:     offset to ENABLE reg
  36 * @status_off: offset to STATUS reg
  37 */
  38struct dpu_intr_reg {
  39        u32 clr_off;
  40        u32 en_off;
  41        u32 status_off;
  42};
  43
  44/*
  45 * struct dpu_intr_reg -  List of DPU interrupt registers
  46 *
  47 * When making changes be sure to sync with dpu_hw_intr_reg
  48 */
  49static const struct dpu_intr_reg dpu_intr_set[] = {
  50        {
  51                MDP_SSPP_TOP0_OFF+INTR_CLEAR,
  52                MDP_SSPP_TOP0_OFF+INTR_EN,
  53                MDP_SSPP_TOP0_OFF+INTR_STATUS
  54        },
  55        {
  56                MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
  57                MDP_SSPP_TOP0_OFF+INTR2_EN,
  58                MDP_SSPP_TOP0_OFF+INTR2_STATUS
  59        },
  60        {
  61                MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
  62                MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
  63                MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
  64        },
  65        {
  66                MDP_INTF_0_OFF+INTF_INTR_CLEAR,
  67                MDP_INTF_0_OFF+INTF_INTR_EN,
  68                MDP_INTF_0_OFF+INTF_INTR_STATUS
  69        },
  70        {
  71                MDP_INTF_1_OFF+INTF_INTR_CLEAR,
  72                MDP_INTF_1_OFF+INTF_INTR_EN,
  73                MDP_INTF_1_OFF+INTF_INTR_STATUS
  74        },
  75        {
  76                MDP_INTF_2_OFF+INTF_INTR_CLEAR,
  77                MDP_INTF_2_OFF+INTF_INTR_EN,
  78                MDP_INTF_2_OFF+INTF_INTR_STATUS
  79        },
  80        {
  81                MDP_INTF_3_OFF+INTF_INTR_CLEAR,
  82                MDP_INTF_3_OFF+INTF_INTR_EN,
  83                MDP_INTF_3_OFF+INTF_INTR_STATUS
  84        },
  85        {
  86                MDP_INTF_4_OFF+INTF_INTR_CLEAR,
  87                MDP_INTF_4_OFF+INTF_INTR_EN,
  88                MDP_INTF_4_OFF+INTF_INTR_STATUS
  89        },
  90        {
  91                MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
  92                MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
  93                MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
  94        },
  95        {
  96                MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
  97                MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
  98                MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
  99        },
 100        {
 101                MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR,
 102                MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN,
 103                MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS
 104        },
 105        {
 106                MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR,
 107                MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN,
 108                MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
 109        },
 110        {
 111                MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
 112                MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
 113                MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
 114        },
 115};
 116
 117#define DPU_IRQ_REG(irq_idx)    (irq_idx / 32)
 118#define DPU_IRQ_MASK(irq_idx)   (BIT(irq_idx % 32))
 119
 120static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
 121                int irq_idx)
 122{
 123        int reg_idx;
 124
 125        if (!intr)
 126                return;
 127
 128        reg_idx = DPU_IRQ_REG(irq_idx);
 129        DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off, DPU_IRQ_MASK(irq_idx));
 130
 131        /* ensure register writes go through */
 132        wmb();
 133}
 134
 135static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
 136                void (*cbfunc)(void *, int),
 137                void *arg)
 138{
 139        int reg_idx;
 140        int irq_idx;
 141        u32 irq_status;
 142        u32 enable_mask;
 143        int bit;
 144        unsigned long irq_flags;
 145
 146        if (!intr)
 147                return;
 148
 149        /*
 150         * The dispatcher will save the IRQ status before calling here.
 151         * Now need to go through each IRQ status and find matching
 152         * irq lookup index.
 153         */
 154        spin_lock_irqsave(&intr->irq_lock, irq_flags);
 155        for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
 156                if (!test_bit(reg_idx, &intr->irq_mask))
 157                        continue;
 158
 159                /* Read interrupt status */
 160                irq_status = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].status_off);
 161
 162                /* Read enable mask */
 163                enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].en_off);
 164
 165                /* and clear the interrupt */
 166                if (irq_status)
 167                        DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
 168                                     irq_status);
 169
 170                /* Finally update IRQ status based on enable mask */
 171                irq_status &= enable_mask;
 172
 173                if (!irq_status)
 174                        continue;
 175
 176                /*
 177                 * Search through matching intr status.
 178                 */
 179                while ((bit = ffs(irq_status)) != 0) {
 180                        irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
 181                        /*
 182                         * Once a match on irq mask, perform a callback
 183                         * to the given cbfunc. cbfunc will take care
 184                         * the interrupt status clearing. If cbfunc is
 185                         * not provided, then the interrupt clearing
 186                         * is here.
 187                         */
 188                        if (cbfunc)
 189                                cbfunc(arg, irq_idx);
 190
 191                        dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx);
 192
 193                        /*
 194                         * When callback finish, clear the irq_status
 195                         * with the matching mask. Once irq_status
 196                         * is all cleared, the search can be stopped.
 197                         */
 198                        irq_status &= ~BIT(bit - 1);
 199                }
 200        }
 201
 202        /* ensure register writes go through */
 203        wmb();
 204
 205        spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
 206}
 207
 208static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
 209{
 210        int reg_idx;
 211        const struct dpu_intr_reg *reg;
 212        const char *dbgstr = NULL;
 213        uint32_t cache_irq_mask;
 214
 215        if (!intr)
 216                return -EINVAL;
 217
 218        if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
 219                pr_err("invalid IRQ index: [%d]\n", irq_idx);
 220                return -EINVAL;
 221        }
 222
 223        /*
 224         * The cache_irq_mask and hardware RMW operations needs to be done
 225         * under irq_lock and it's the caller's responsibility to ensure that's
 226         * held.
 227         */
 228        assert_spin_locked(&intr->irq_lock);
 229
 230        reg_idx = DPU_IRQ_REG(irq_idx);
 231        reg = &dpu_intr_set[reg_idx];
 232
 233        cache_irq_mask = intr->cache_irq_mask[reg_idx];
 234        if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
 235                dbgstr = "DPU IRQ already set:";
 236        } else {
 237                dbgstr = "DPU IRQ enabled:";
 238
 239                cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
 240                /* Cleaning any pending interrupt */
 241                DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
 242                /* Enabling interrupts with the new mask */
 243                DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
 244
 245                /* ensure register write goes through */
 246                wmb();
 247
 248                intr->cache_irq_mask[reg_idx] = cache_irq_mask;
 249        }
 250
 251        pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
 252                        DPU_IRQ_MASK(irq_idx), cache_irq_mask);
 253
 254        return 0;
 255}
 256
 257static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
 258{
 259        int reg_idx;
 260        const struct dpu_intr_reg *reg;
 261        const char *dbgstr = NULL;
 262        uint32_t cache_irq_mask;
 263
 264        if (!intr)
 265                return -EINVAL;
 266
 267        if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
 268                pr_err("invalid IRQ index: [%d]\n", irq_idx);
 269                return -EINVAL;
 270        }
 271
 272        /*
 273         * The cache_irq_mask and hardware RMW operations needs to be done
 274         * under irq_lock and it's the caller's responsibility to ensure that's
 275         * held.
 276         */
 277        assert_spin_locked(&intr->irq_lock);
 278
 279        reg_idx = DPU_IRQ_REG(irq_idx);
 280        reg = &dpu_intr_set[reg_idx];
 281
 282        cache_irq_mask = intr->cache_irq_mask[reg_idx];
 283        if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
 284                dbgstr = "DPU IRQ is already cleared:";
 285        } else {
 286                dbgstr = "DPU IRQ mask disable:";
 287
 288                cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
 289                /* Disable interrupts based on the new mask */
 290                DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
 291                /* Cleaning any pending interrupt */
 292                DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
 293
 294                /* ensure register write goes through */
 295                wmb();
 296
 297                intr->cache_irq_mask[reg_idx] = cache_irq_mask;
 298        }
 299
 300        pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
 301                        DPU_IRQ_MASK(irq_idx), cache_irq_mask);
 302
 303        return 0;
 304}
 305
 306static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr)
 307{
 308        int i;
 309
 310        if (!intr)
 311                return -EINVAL;
 312
 313        for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
 314                if (test_bit(i, &intr->irq_mask))
 315                        DPU_REG_WRITE(&intr->hw,
 316                                        dpu_intr_set[i].clr_off, 0xffffffff);
 317        }
 318
 319        /* ensure register writes go through */
 320        wmb();
 321
 322        return 0;
 323}
 324
 325static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
 326{
 327        int i;
 328
 329        if (!intr)
 330                return -EINVAL;
 331
 332        for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
 333                if (test_bit(i, &intr->irq_mask))
 334                        DPU_REG_WRITE(&intr->hw,
 335                                        dpu_intr_set[i].en_off, 0x00000000);
 336        }
 337
 338        /* ensure register writes go through */
 339        wmb();
 340
 341        return 0;
 342}
 343
 344static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
 345                int irq_idx, bool clear)
 346{
 347        int reg_idx;
 348        unsigned long irq_flags;
 349        u32 intr_status;
 350
 351        if (!intr)
 352                return 0;
 353
 354        if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
 355                pr_err("invalid IRQ index: [%d]\n", irq_idx);
 356                return 0;
 357        }
 358
 359        spin_lock_irqsave(&intr->irq_lock, irq_flags);
 360
 361        reg_idx = DPU_IRQ_REG(irq_idx);
 362        intr_status = DPU_REG_READ(&intr->hw,
 363                        dpu_intr_set[reg_idx].status_off) &
 364                DPU_IRQ_MASK(irq_idx);
 365        if (intr_status && clear)
 366                DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
 367                                intr_status);
 368
 369        /* ensure register writes go through */
 370        wmb();
 371
 372        spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
 373
 374        return intr_status;
 375}
 376
 377static unsigned long dpu_hw_intr_lock(struct dpu_hw_intr *intr)
 378{
 379        unsigned long irq_flags;
 380
 381        spin_lock_irqsave(&intr->irq_lock, irq_flags);
 382
 383        return irq_flags;
 384}
 385
 386static void dpu_hw_intr_unlock(struct dpu_hw_intr *intr, unsigned long irq_flags)
 387{
 388        spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
 389}
 390
 391static void __setup_intr_ops(struct dpu_hw_intr_ops *ops)
 392{
 393        ops->enable_irq_locked = dpu_hw_intr_enable_irq_locked;
 394        ops->disable_irq_locked = dpu_hw_intr_disable_irq_locked;
 395        ops->dispatch_irqs = dpu_hw_intr_dispatch_irq;
 396        ops->clear_all_irqs = dpu_hw_intr_clear_irqs;
 397        ops->disable_all_irqs = dpu_hw_intr_disable_irqs;
 398        ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status;
 399        ops->lock = dpu_hw_intr_lock;
 400        ops->unlock = dpu_hw_intr_unlock;
 401}
 402
 403static void __intr_offset(struct dpu_mdss_cfg *m,
 404                void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
 405{
 406        hw->base_off = addr;
 407        hw->blk_off = m->mdp[0].base;
 408        hw->hwversion = m->hwversion;
 409}
 410
 411struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
 412                struct dpu_mdss_cfg *m)
 413{
 414        struct dpu_hw_intr *intr;
 415
 416        if (!addr || !m)
 417                return ERR_PTR(-EINVAL);
 418
 419        intr = kzalloc(sizeof(*intr), GFP_KERNEL);
 420        if (!intr)
 421                return ERR_PTR(-ENOMEM);
 422
 423        __intr_offset(m, addr, &intr->hw);
 424        __setup_intr_ops(&intr->ops);
 425
 426        intr->total_irqs = ARRAY_SIZE(dpu_intr_set) * 32;
 427
 428        intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
 429                        GFP_KERNEL);
 430        if (intr->cache_irq_mask == NULL) {
 431                kfree(intr);
 432                return ERR_PTR(-ENOMEM);
 433        }
 434
 435        intr->irq_mask = m->mdss_irqs;
 436
 437        spin_lock_init(&intr->irq_lock);
 438
 439        return intr;
 440}
 441
 442void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
 443{
 444        if (intr) {
 445                kfree(intr->cache_irq_mask);
 446                kfree(intr);
 447        }
 448}
 449
 450