linux/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
   3 */
   4
   5#define pr_fmt(fmt)     "[drm:%s:%d] " fmt, __func__, __LINE__
   6
   7#include <linux/debugfs.h>
   8#include <linux/irqdomain.h>
   9#include <linux/irq.h>
  10#include <linux/kthread.h>
  11
  12#include "dpu_core_irq.h"
  13#include "dpu_trace.h"
  14
  15/**
  16 * dpu_core_irq_callback_handler - dispatch core interrupts
  17 * @arg:                private data of callback handler
  18 * @irq_idx:            interrupt index
  19 */
  20static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
  21{
  22        struct dpu_kms *dpu_kms = arg;
  23        struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
  24        struct dpu_irq_callback *cb;
  25        unsigned long irq_flags;
  26
  27        pr_debug("irq_idx=%d\n", irq_idx);
  28
  29        if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
  30                DRM_ERROR("no registered cb, idx:%d enable_count:%d\n", irq_idx,
  31                        atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]));
  32        }
  33
  34        atomic_inc(&irq_obj->irq_counts[irq_idx]);
  35
  36        /*
  37         * Perform registered function callback
  38         */
  39        spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
  40        list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
  41                if (cb->func)
  42                        cb->func(cb->arg, irq_idx);
  43        spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
  44
  45        /*
  46         * Clear pending interrupt status in HW.
  47         * NOTE: dpu_core_irq_callback_handler is protected by top-level
  48         *       spinlock, so it is safe to clear any interrupt status here.
  49         */
  50        dpu_kms->hw_intr->ops.clear_intr_status_nolock(
  51                        dpu_kms->hw_intr,
  52                        irq_idx);
  53}
  54
  55int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
  56                enum dpu_intr_type intr_type, u32 instance_idx)
  57{
  58        if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.irq_idx_lookup)
  59                return -EINVAL;
  60
  61        return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type,
  62                        instance_idx);
  63}
  64
  65/**
  66 * _dpu_core_irq_enable - enable core interrupt given by the index
  67 * @dpu_kms:            Pointer to dpu kms context
  68 * @irq_idx:            interrupt index
  69 */
  70static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
  71{
  72        unsigned long irq_flags;
  73        int ret = 0, enable_count;
  74
  75        if (!dpu_kms->hw_intr ||
  76                        !dpu_kms->irq_obj.enable_counts ||
  77                        !dpu_kms->irq_obj.irq_counts) {
  78                DPU_ERROR("invalid params\n");
  79                return -EINVAL;
  80        }
  81
  82        if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
  83                DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
  84                return -EINVAL;
  85        }
  86
  87        enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
  88        DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
  89        trace_dpu_core_irq_enable_idx(irq_idx, enable_count);
  90
  91        if (atomic_inc_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 1) {
  92                ret = dpu_kms->hw_intr->ops.enable_irq(
  93                                dpu_kms->hw_intr,
  94                                irq_idx);
  95                if (ret)
  96                        DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
  97                                        irq_idx);
  98
  99                DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
 100
 101                spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
 102                /* empty callback list but interrupt is enabled */
 103                if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]))
 104                        DPU_ERROR("irq_idx=%d enabled with no callback\n",
 105                                        irq_idx);
 106                spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
 107        }
 108
 109        return ret;
 110}
 111
 112int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
 113{
 114        int i, ret = 0, counts;
 115
 116        if (!irq_idxs || !irq_count) {
 117                DPU_ERROR("invalid params\n");
 118                return -EINVAL;
 119        }
 120
 121        counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
 122        if (counts)
 123                DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
 124
 125        for (i = 0; (i < irq_count) && !ret; i++)
 126                ret = _dpu_core_irq_enable(dpu_kms, irq_idxs[i]);
 127
 128        return ret;
 129}
 130
 131/**
 132 * _dpu_core_irq_disable - disable core interrupt given by the index
 133 * @dpu_kms:            Pointer to dpu kms context
 134 * @irq_idx:            interrupt index
 135 */
 136static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
 137{
 138        int ret = 0, enable_count;
 139
 140        if (!dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
 141                DPU_ERROR("invalid params\n");
 142                return -EINVAL;
 143        }
 144
 145        if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
 146                DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
 147                return -EINVAL;
 148        }
 149
 150        enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
 151        DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
 152        trace_dpu_core_irq_disable_idx(irq_idx, enable_count);
 153
 154        if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) {
 155                ret = dpu_kms->hw_intr->ops.disable_irq(
 156                                dpu_kms->hw_intr,
 157                                irq_idx);
 158                if (ret)
 159                        DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
 160                                        irq_idx);
 161                DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
 162        }
 163
 164        return ret;
 165}
 166
 167int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
 168{
 169        int i, ret = 0, counts;
 170
 171        if (!irq_idxs || !irq_count) {
 172                DPU_ERROR("invalid params\n");
 173                return -EINVAL;
 174        }
 175
 176        counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
 177        if (counts == 2)
 178                DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
 179
 180        for (i = 0; (i < irq_count) && !ret; i++)
 181                ret = _dpu_core_irq_disable(dpu_kms, irq_idxs[i]);
 182
 183        return ret;
 184}
 185
 186u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
 187{
 188        if (!dpu_kms->hw_intr ||
 189                        !dpu_kms->hw_intr->ops.get_interrupt_status)
 190                return 0;
 191
 192        if (irq_idx < 0) {
 193                DPU_ERROR("[%pS] invalid irq_idx=%d\n",
 194                                __builtin_return_address(0), irq_idx);
 195                return 0;
 196        }
 197
 198        return dpu_kms->hw_intr->ops.get_interrupt_status(dpu_kms->hw_intr,
 199                        irq_idx, clear);
 200}
 201
 202int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
 203                struct dpu_irq_callback *register_irq_cb)
 204{
 205        unsigned long irq_flags;
 206
 207        if (!dpu_kms->irq_obj.irq_cb_tbl) {
 208                DPU_ERROR("invalid params\n");
 209                return -EINVAL;
 210        }
 211
 212        if (!register_irq_cb || !register_irq_cb->func) {
 213                DPU_ERROR("invalid irq_cb:%d func:%d\n",
 214                                register_irq_cb != NULL,
 215                                register_irq_cb ?
 216                                        register_irq_cb->func != NULL : -1);
 217                return -EINVAL;
 218        }
 219
 220        if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
 221                DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
 222                return -EINVAL;
 223        }
 224
 225        DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
 226
 227        spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
 228        trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
 229        list_del_init(&register_irq_cb->list);
 230        list_add_tail(&register_irq_cb->list,
 231                        &dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
 232        spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
 233
 234        return 0;
 235}
 236
 237int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
 238                struct dpu_irq_callback *register_irq_cb)
 239{
 240        unsigned long irq_flags;
 241
 242        if (!dpu_kms->irq_obj.irq_cb_tbl) {
 243                DPU_ERROR("invalid params\n");
 244                return -EINVAL;
 245        }
 246
 247        if (!register_irq_cb || !register_irq_cb->func) {
 248                DPU_ERROR("invalid irq_cb:%d func:%d\n",
 249                                register_irq_cb != NULL,
 250                                register_irq_cb ?
 251                                        register_irq_cb->func != NULL : -1);
 252                return -EINVAL;
 253        }
 254
 255        if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
 256                DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
 257                return -EINVAL;
 258        }
 259
 260        DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
 261
 262        spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
 263        trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
 264        list_del_init(&register_irq_cb->list);
 265        /* empty callback list but interrupt is still enabled */
 266        if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
 267                        atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]))
 268                DPU_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
 269        spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
 270
 271        return 0;
 272}
 273
 274static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
 275{
 276        if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.clear_all_irqs)
 277                return;
 278
 279        dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr);
 280}
 281
 282static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
 283{
 284        if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.disable_all_irqs)
 285                return;
 286
 287        dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr);
 288}
 289
 290#ifdef CONFIG_DEBUG_FS
 291#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                           \
 292static int __prefix ## _open(struct inode *inode, struct file *file)    \
 293{                                                                       \
 294        return single_open(file, __prefix ## _show, inode->i_private);  \
 295}                                                                       \
 296static const struct file_operations __prefix ## _fops = {               \
 297        .owner = THIS_MODULE,                                           \
 298        .open = __prefix ## _open,                                      \
 299        .release = single_release,                                      \
 300        .read = seq_read,                                               \
 301        .llseek = seq_lseek,                                            \
 302}
 303
 304static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
 305{
 306        struct dpu_irq *irq_obj = s->private;
 307        struct dpu_irq_callback *cb;
 308        unsigned long irq_flags;
 309        int i, irq_count, enable_count, cb_count;
 310
 311        if (WARN_ON(!irq_obj->enable_counts || !irq_obj->irq_cb_tbl))
 312                return 0;
 313
 314        for (i = 0; i < irq_obj->total_irqs; i++) {
 315                spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
 316                cb_count = 0;
 317                irq_count = atomic_read(&irq_obj->irq_counts[i]);
 318                enable_count = atomic_read(&irq_obj->enable_counts[i]);
 319                list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
 320                        cb_count++;
 321                spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
 322
 323                if (irq_count || enable_count || cb_count)
 324                        seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
 325                                        i, irq_count, enable_count, cb_count);
 326        }
 327
 328        return 0;
 329}
 330
 331DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
 332
 333void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
 334                struct dentry *parent)
 335{
 336        debugfs_create_file("core_irq", 0600, parent, &dpu_kms->irq_obj,
 337                &dpu_debugfs_core_irq_fops);
 338}
 339#endif
 340
 341void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
 342{
 343        int i;
 344
 345        pm_runtime_get_sync(&dpu_kms->pdev->dev);
 346        dpu_clear_all_irqs(dpu_kms);
 347        dpu_disable_all_irqs(dpu_kms);
 348        pm_runtime_put_sync(&dpu_kms->pdev->dev);
 349
 350        spin_lock_init(&dpu_kms->irq_obj.cb_lock);
 351
 352        /* Create irq callbacks for all possible irq_idx */
 353        dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->irq_idx_tbl_size;
 354        dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
 355                        sizeof(struct list_head), GFP_KERNEL);
 356        dpu_kms->irq_obj.enable_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
 357                        sizeof(atomic_t), GFP_KERNEL);
 358        dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
 359                        sizeof(atomic_t), GFP_KERNEL);
 360        for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) {
 361                INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]);
 362                atomic_set(&dpu_kms->irq_obj.enable_counts[i], 0);
 363                atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0);
 364        }
 365}
 366
 367void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
 368{
 369        int i;
 370
 371        pm_runtime_get_sync(&dpu_kms->pdev->dev);
 372        for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
 373                if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) ||
 374                                !list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
 375                        DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
 376
 377        dpu_clear_all_irqs(dpu_kms);
 378        dpu_disable_all_irqs(dpu_kms);
 379        pm_runtime_put_sync(&dpu_kms->pdev->dev);
 380
 381        kfree(dpu_kms->irq_obj.irq_cb_tbl);
 382        kfree(dpu_kms->irq_obj.enable_counts);
 383        kfree(dpu_kms->irq_obj.irq_counts);
 384        dpu_kms->irq_obj.irq_cb_tbl = NULL;
 385        dpu_kms->irq_obj.enable_counts = NULL;
 386        dpu_kms->irq_obj.irq_counts = NULL;
 387        dpu_kms->irq_obj.total_irqs = 0;
 388}
 389
 390irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
 391{
 392        /*
 393         * Read interrupt status from all sources. Interrupt status are
 394         * stored within hw_intr.
 395         * Function will also clear the interrupt status after reading.
 396         * Individual interrupt status bit will only get stored if it
 397         * is enabled.
 398         */
 399        dpu_kms->hw_intr->ops.get_interrupt_statuses(dpu_kms->hw_intr);
 400
 401        /*
 402         * Dispatch to HW driver to handle interrupt lookup that is being
 403         * fired. When matching interrupt is located, HW driver will call to
 404         * dpu_core_irq_callback_handler with the irq_idx from the lookup table.
 405         * dpu_core_irq_callback_handler will perform the registered function
 406         * callback, and do the interrupt status clearing once the registered
 407         * callback is finished.
 408         */
 409        dpu_kms->hw_intr->ops.dispatch_irqs(
 410                        dpu_kms->hw_intr,
 411                        dpu_core_irq_callback_handler,
 412                        dpu_kms);
 413
 414        return IRQ_HANDLED;
 415}
 416